input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
#!/usr/bin/env python
from unittest import main
from typing import Type
from cli_command_parser import Command, Context
from cli_command_parser.core import get_params
from cli_command_parser.exceptions import (
UsageError,
ParameterDefinitionError,
CommandDefinitionError,
ParamsMissing,
ParamConflict,
)
from cli_command_parser.parameters import ParamGroup, Flag, Positional, PassThru, SubCommand, Action, Option
from cli_command_parser.testing import ParserTest
class _GroupTest(ParserTest):
def assert_cases_for_cmds(self, success_cases, fail_cases, *cmds, exc: Type[Exception] = None):
for cmd in cmds:
with self.subTest(cmd=cmd):
self.assert_parse_results_cases(cmd, success_cases)
self.assert_parse_fails_cases(cmd, fail_cases, exc)
class GroupTest(_GroupTest):
def test_plain(self):
class Foo(Command):
with ParamGroup() as group:
bar = Flag('-b')
baz = Flag('-B')
success_cases = [
([], {'bar': False, 'baz': False}),
(['-b'], {'bar': True, 'baz': False}),
(['-B'], {'bar': False, 'baz': True}),
(['-bB'], {'bar': True, 'baz': True}),
]
self.assert_parse_results_cases(Foo, success_cases)
def test_params_know_own_group(self):
class Foo(Command):
foo = Flag('-f')
with ParamGroup(mutually_exclusive=True) as group:
bar = Flag('-b')
baz = Flag('-B')
self.assertIs(Foo.foo.group, None)
self.assertIs(Foo.bar.group, Foo.group)
self.assertIs(Foo.baz.group, Foo.group)
def test_reject_double_mutual(self):
with self.assertRaises(ParameterDefinitionError):
ParamGroup(mutually_dependent=True, mutually_exclusive=True)
def test_register_all(self):
class Foo(Command):
group = ParamGroup()
foo = Flag()
bar = Flag()
group.register_all([foo, bar])
self.assertTrue(all(p.group == Foo.group for p in (Foo.foo, Foo.bar)))
self.assertEqual(2, len(list(Foo.group)))
def test_repr(self):
group = ParamGroup('foo', mutually_exclusive=True)
self.assertIn('m.exclusive=T', repr(group))
def test_description(self):
with Context():
self.assertIn('exclusive', ParamGroup('foo', mutually_exclusive=True).formatter.format_description())
def test_required_group(self):
class Foo1(Command):
with ParamGroup(required=True) as group:
bar = Flag('-b')
baz = Flag('-B')
class Foo2(Command):
with ParamGroup(required=True) as group:
bar = Flag('-b')
baz = Flag('-B')
success_cases = [
(['-b'], {'bar': True, 'baz': False}),
(['-B'], {'bar': False, 'baz': True}),
(['-bB'], {'bar': True, 'baz': True}),
]
fail_cases = [([], UsageError)]
self.assert_cases_for_cmds(success_cases, fail_cases, Foo1, Foo2)
def test_required_param_missing_from_non_required_group(self):
class Foo(Command):
with ParamGroup() as group:
bar = Option('-b', required=True)
baz = Flag('-B')
self.assert_parse_fails(Foo, [], ParamsMissing)
class MutuallyExclusiveGroupTest(_GroupTest):
def test_mutually_exclusive(self):
class Foo1(Command):
with ParamGroup(mutually_exclusive=True) as group:
bar = Flag('-b')
baz = Flag('-B')
class Foo2(Command):
with ParamGroup(mutually_exclusive=True):
bar = Flag('-b', name='bar')
baz = Flag('-B', name='baz')
success_cases = [
([], {'bar': False, 'baz': False}),
(['-b'], {'bar': True, 'baz': False}),
(['-B'], {'bar': False, 'baz': True}),
]
fail_cases = [(['-bB'], UsageError), (['-B', '-b'], UsageError, 'mutually exclusive - only one is allowed')]
self.assert_cases_for_cmds(success_cases, fail_cases, Foo1, Foo2)
def test_positional_nargs_qm(self):
class Foo1(Command):
with ParamGroup(mutually_exclusive=True) as group:
foo = Positional(nargs='?')
bar = Flag('-b')
class Foo2(Command):
with ParamGroup(mutually_exclusive=True):
foo = Positional(nargs='?')
bar = Flag('-b')
success_cases = [
([], {'foo': None, 'bar': False}),
(['a'], {'foo': 'a', 'bar': False}),
(['-b'], {'foo': None, 'bar': True}),
]
fail_cases = [['-b', 'a'], ['a', '-b']]
self.assert_cases_for_cmds(success_cases, fail_cases, Foo1, Foo2, exc=UsageError)
def test_bad_members_rejected(self):
fail_cases = [
(Positional, {}),
(PassThru, {}),
(SubCommand, {}),
(Action, {}),
(Option, {'required': True}),
]
for param_cls, kwargs in fail_cases:
with self.subTest(param_cls=param_cls, named=True), self.assertRaises(CommandDefinitionError):
class Foo1(Command):
with ParamGroup(mutually_exclusive=True) as group:
foo = param_cls(**kwargs)
bar = Flag('-b')
with self.subTest(param_cls=param_cls, named=False), self.assertRaises(CommandDefinitionError):
class Foo2(Command):
with ParamGroup(mutually_exclusive=True):
foo = param_cls(**kwargs)
bar = Flag('-b')
def test_me_and_plain_groups(self):
class Foo1(Command):
with ParamGroup() as a:
foo = Flag('-f')
with ParamGroup(mutually_exclusive=True) as b:
bar = Flag('-b')
baz = Flag('-B')
class Foo2(Command):
with ParamGroup():
foo = Flag('-f')
with ParamGroup(mutually_exclusive=True):
bar = Flag('-b')
baz = Flag('-B')
success_cases = [
([], {'foo': False, 'bar': False, 'baz': False}),
(['-b'], {'foo': False, 'bar': True, 'baz': False}),
(['-B'], {'foo': False, 'bar': False, 'baz': True}),
(['-f'], {'foo': True, 'bar': False, 'baz': False}),
(['-fb'], {'foo': True, 'bar': True, 'baz': False}),
(['-fB'], {'foo': True, 'bar': False, 'baz': True}),
]
fail_cases = [['-bB'], ['-B', '-b'], ['-fbB'], ['-f', '-B', '-b']]
self.assert_cases_for_cmds(success_cases, fail_cases, Foo1, Foo2, exc=UsageError)
class MutuallyDependentGroupTest(_GroupTest):
def test_mutually_dependent(self):
class Foo1(Command):
with ParamGroup(mutually_dependent=True) as group:
bar = Flag('-b')
baz = Flag('-B')
class Foo2(Command):
with ParamGroup(mutually_dependent=True):
bar = Flag('-b')
baz = Flag('-B')
success_cases = [
([], {'bar': False, 'baz': False}),
(['-bB'], {'bar': True, 'baz': True}),
(['-b', '-B'], {'bar': True, 'baz': True}),
]
fail_cases = [(['-b'], UsageError), (['-B'], UsageError)]
self.assert_cases_for_cmds(success_cases, fail_cases, Foo1, Foo2)
class NestedGroupTest(_GroupTest):
def test_nested_me_in_md(self):
class Foo1(Command):
with ParamGroup(mutually_dependent=True) as outer:
with ParamGroup(mutually_exclusive=True) as inner:
foo = Flag()
bar = Flag()
baz = Flag()
class Foo2(Command):
with ParamGroup(mutually_dependent=True):
with ParamGroup(mutually_exclusive=True):
foo = Flag()
bar = Flag()
baz = Flag()
self.assertIn(Foo1.foo, Foo1.inner)
self.assertIn(Foo1.bar, Foo1.inner)
self.assertIn(Foo1.baz, Foo1.outer)
self.assertNotIn(Foo1.foo, Foo1.outer)
self.assertNotIn(Foo1.bar, Foo1.outer)
self.assertNotIn(Foo1.baz, Foo1.inner)
self.assertIn(Foo1.inner, Foo1.outer)
success_cases = [
(['--foo', '--baz'], {'foo': True, 'bar': False, 'baz': True}),
(['--bar', '--baz'], {'foo': False, 'bar': True, 'baz': True}),
]
fail_cases = [['--foo', '--bar', '--baz'], ['--foo', '--bar'], ['--foo'], ['--bar'], ['--baz']]
self.assert_cases_for_cmds(success_cases, fail_cases, Foo1, Foo2, exc=UsageError)
def test_nested_me_in_me(self):
class Foo1(Command):
with ParamGroup(mutually_exclusive=True) as outer:
with ParamGroup(mutually_exclusive=True) as inner:
foo = Flag()
bar = Flag()
baz = Flag()
class Foo2(Command):
with ParamGroup(mutually_exclusive=True):
with ParamGroup(mutually_exclusive=True):
foo = Flag()
bar = Flag()
baz = Flag()
success_cases = [
(['--foo'], {'foo': True, 'bar': False, 'baz': False}),
(['--bar'], {'foo': False, 'bar': True, 'baz': False}),
(['--baz'], {'foo': False, 'bar': False, 'baz': True}),
]
fail_cases = [['--foo', '--bar', '--baz'], ['--foo', '--bar'], ['--foo', '--baz'], ['--bar', '--baz']]
self.assert_cases_for_cmds(success_cases, fail_cases, Foo1, Foo2, exc=UsageError)
def test_nested_md_in_me(self):
class Foo1(Command):
with ParamGroup(mutually_exclusive=True) as outer:
with ParamGroup(mutually_dependent=True) as inner:
foo = Flag()
bar = Flag()
baz = Flag()
class Foo2(Command):
with ParamGroup(mutually_exclusive=True):
with ParamGroup(mutually_dependent=True):
foo = Flag()
bar = Flag()
baz = Flag()
success_cases = [
(['--foo', '--bar'], {'foo': True, 'bar': True, 'baz': False}),
(['--baz'], {'foo': False, 'bar': False, 'baz': True}),
]
fail_cases = [['--foo', '--bar', '--baz'], ['--foo', '--baz'], ['--bar', '--baz']]
self.assert_cases_for_cmds(success_cases, fail_cases, Foo1, Foo2, exc=UsageError)
def test_nested_md_in_md(self):
class Foo1(Command):
with ParamGroup(mutually_dependent=True) as outer:
with ParamGroup(mutually_dependent=True) as inner:
foo = Flag()
bar = Flag()
baz = Flag()
class Foo2(Command):
with ParamGroup(mutually_dependent=True):
with ParamGroup(mutually_dependent=True):
foo = Flag()
bar = Flag()
baz = Flag()
fail_cases = [['--foo', '--bar'], ['--foo', '--baz'], ['--bar', '--baz'], ['--foo'], ['--bar'], ['--baz']]
for cmd in (Foo1, Foo2):
with self.subTest(cmd=cmd):
self.assert_parse_results(cmd, ['--foo', '--bar', '--baz'], {'foo': True, 'bar': True, 'baz': True})
self.assert_parse_fails_cases(cmd, fail_cases, UsageError)
def test_nested_me_in_me_with_plain(self):
class Foo1(Command):
with ParamGroup(mutually_exclusive=True) as outer:
with ParamGroup(mutually_exclusive=True) as inner:
a = Flag('-a')
b = Flag('-b')
c = Flag('-c')
with ParamGroup() as plain:
d = Flag('-d')
class Foo2(Command):
with ParamGroup(mutually_exclusive=True):
with ParamGroup(mutually_exclusive=True):
a = Flag('-a')
b = Flag('-b')
c = Flag('-c')
with ParamGroup():
d = Flag('-d')
success_cases = [
(['-a'], {'a': True, 'b': False, 'c': False, 'd': False}),
(['-b'], {'a': False, 'b': True, 'c': False, 'd': False}),
(['-c'], {'a': False, 'b': False, 'c': True, 'd': False}),
(['-ad'], {'a': True, 'b': False, 'c': False, 'd': True}),
(['-bd'], {'a': False, 'b': True, 'c': False, 'd': True}),
(['-cd'], {'a': False, 'b': False, 'c': True, 'd': True}),
]
fail_cases = [['-abc'], ['-ab'], ['-ac'], ['-bc'], ['-abcd'], ['-abd'], ['-acd'], ['-bcd']]
self.assert_cases_for_cmds(success_cases, fail_cases, Foo1, Foo2, exc=UsageError)
def test_non_me_nested_in_me_with_me(self):
class Foo1(Command):
with ParamGroup(mutually_exclusive=True) as outer:
with ParamGroup(mutually_exclusive=True) as inner:
a = Flag('-a')
b = Flag('-b')
c = Flag('-c')
with ParamGroup() as plain:
d = Flag('-d')
e = Flag('-e')
class Foo2(Command):
with ParamGroup(mutually_exclusive=True):
with ParamGroup(mutually_exclusive=True):
a = Flag('-a')
b = Flag('-b')
c = Flag('-c')
with ParamGroup():
d = Flag('-d')
e = Flag('-e')
with self.subTest('group sort test'):
self.assertIn(Foo1.plain, Foo1.outer)
self.assertIn(Foo1.inner, Foo1.outer)
self.assertIn(Foo1.c, Foo1.outer)
expected = [Foo1.inner, Foo1.plain, Foo1.outer]
self.assertListEqual(expected, get_params(Foo1).groups)
self.assertListEqual(expected, sorted([Foo1.plain, Foo1.outer, Foo1.inner]))
success_cases = [
(['-a'], {'a': True, 'b': False, 'c': False, 'd': False, 'e': False}),
(['-b'], {'a': False, 'b': True, 'c': False, 'd': False, 'e': False}),
(['-c'], {'a': False, 'b': False, 'c': True, 'd': False, 'e': False}),
(['-d'], {'a': False, 'b': False, 'c': False, 'd': True, 'e': False}),
(['-e'], {'a': False, 'b': False, 'c': False, 'd': False, 'e': True}),
(['-de'], {'a': False, 'b': False, 'c': False, 'd': True, 'e': True}),
]
# fmt: off
fail_cases = [
['-abc'], ['-ab'], ['-ac'], ['-bc'],
['-abcd'], ['-abd'], ['-acd'], ['-bcd'],
['-abce'], ['-abe'], ['-ace'], ['-bce'],
['-abcde'], ['-abde'], ['-acde'], ['-bcde'],
['-ad'], ['-ae'], ['-ade'],
['-bd'], ['-be'], ['-bde'],
['-cd'], ['-ce'], ['-cde'],
]
# fmt: on
self.assert_cases_for_cmds(success_cases, fail_cases, Foo1, Foo2, exc=UsageError)
def test_nested_group_sorting_1(self):
class Foo(Command):
with ParamGroup() as outer:
with ParamGroup() as inner_1:
a = Flag('-a')
with ParamGroup() as nested_inner_2:
c = Flag('-c')
with ParamGroup() as inner_2:
b = Flag('-b')
with ParamGroup() as nested_inner_1:
d = Flag('-d')
expected = [Foo.nested_inner_2, Foo.nested_inner_1, Foo.inner_1, Foo.inner_2, Foo.outer]
self.assertListEqual(expected, get_params(Foo).groups)
def test_nested_group_sorting_2(self):
class Foo(Command):
with ParamGroup() as outer_1:
a = Flag('-a')
with ParamGroup() as inner_2:
c = Flag('-c')
with ParamGroup() as outer_2:
b = Flag('-b')
with ParamGroup() as inner_1:
d = Flag('-d')
expected = [Foo.inner_2, Foo.inner_1, Foo.outer_1, Foo.outer_2]
self.assertListEqual(expected, get_params(Foo).groups)
def test_nested_group_sorting_3(self):
class Foo(Command):
with ParamGroup() as outer_1:
a = Flag('-a')
with ParamGroup() as inner_2:
c = Flag('-c')
with ParamGroup() as inner_3:
d = Flag('-d')
with ParamGroup() as outer_2:
b = Flag('-b')
| |
from pandac.PandaModules import *
from direct.showbase.DirectObject import DirectObject
import math
import copy
class TexMemWatcher(DirectObject):
"""
This class creates a separate graphics window that displays an
approximation of the current texture memory, showing the textures
that are resident and/or active, and an approximation of the
amount of texture memory consumed by each one. It's intended as a
useful tool to help determine where texture memory is being spent.
Although it represents the textures visually in a 2-d space, it
doesn't actually have any idea how textures are physically laid
out in memory--but it has to lay them out somehow, so it makes
something up. It occasionally rearranges the texture display when
it feels it needs to, without regard to what the graphics card is
actually doing. This tool can't be used to research texture
memory fragmentation issues.
"""
NextIndex = 1
StatusHeight = 20 # in pixels
def __init__(self, gsg = None, limit = None):
DirectObject.__init__(self)
# First, we'll need a name to uniquify the object.
self.name = 'tex-mem%s' % (TexMemWatcher.NextIndex)
TexMemWatcher.NextIndex += 1
self.cleanedUp = False
self.top = 1.0
# The textures managed by the TexMemWatcher are packed
# arbitrarily into the canvas, which is the viewable region
# that represents texture memory allocation. The packing
# arrangement has no relation to actual layout within texture
# memory (which we have no way to determine).
# The visual size of each texture is chosen in proportion to
# the total number of bytes of texture memory the texture
# consumes. This includes mipmaps, and accounts for texture
# compression. Visually, a texture with mipmaps will be
# represented by a rectangle 33% larger than an
# equivalent-sized texture without mipmaps. Of course, this
# once again has little bearing to the way the textures are
# actually arranged in memory; but it serves to give a visual
# indication of how much texture memory each texture consumes.
# There is an arbitrary limit, self.limit, which may have been
# passed to the constructor, or which may be arbitrarily
# determined. This represents the intended limit to texture
# memory utilization. We (generously) assume that the
# graphics card will implement a perfect texture packing
# algorithm, so that as long as our total utilization <=
# self.limit, it must fit within texture memory. We represent
# this visually by aggressively packing textures within the
# self.limit block so that they are guaranteed to fit, as long
# as we do not exceed the total utilization. This may
# sometimes mean distorting a texture block or even breaking
# it into multiple pieces to get it to fit, clearly
# fictionalizing whatever the graphics driver is actually
# doing.
# Internally, textures are packed into an integer grid of
# Q-units. Q-units are in proportion to texture bytes.
# Specifically, each Q-unit corresponds to a block of
# self.quantize * self.quantize texture bytes in the Texture
# Memory window. The Q-units are the smallest packable unit;
# increasing self.quantize therefore reduces the visual
# packing resolution correspondingly. Q-units very roughly
# correspond to pixels onscreen (they may be larger, sometimes
# considerably larger, than 1 pixel, depending on the window
# size).
# This number defines the size of a Q-unit square, in texture
# bytes. It is automatically adjusted in repack() based on
# the window size and the texture memory size.
self.quantize = 1
# This is the maximum number of bitmask rows (within
# self.limit) to allocate for packing. This controls the
# value assigned to self.quantize in repack().
self.maxHeight = base.config.GetInt('tex-mem-max-height', 300)
# The total number of texture bytes tracked, including overflow.
self.totalSize = 0
# The total number of texture bytes placed, not including
# overflow (that is, within self.limit).
self.placedSize = 0
# The total number of Q-units placed, not including overflow.
self.placedQSize = 0
# If no GSG is specified, use the main GSG.
if gsg is None:
gsg = base.win.getGsg()
elif isinstance(gsg, GraphicsOutput):
# If we were passed a window, use that window's GSG.
gsg = gsg.getGsg()
self.gsg = gsg
# Now open a new window just to render the output.
size = ConfigVariableInt('tex-mem-win-size', '300 300')
origin = ConfigVariableInt('tex-mem-win-origin', '100 100')
self.winSize = (size[0], size[1])
name = 'Texture Memory'
props = WindowProperties()
props.setOrigin(origin[0], origin[1])
props.setSize(*self.winSize)
props.setTitle(name)
props.setFullscreen(False)
props.setUndecorated(False)
fbprops = FrameBufferProperties.getDefault()
flags = GraphicsPipe.BFFbPropsOptional | GraphicsPipe.BFRequireWindow
self.pipe = None
# Set this to tinydisplay if you're running on a machine with
# limited texture memory. That way you won't compete for
# texture memory with the main scene.
moduleName = base.config.GetString('tex-mem-pipe', '')
if moduleName:
self.pipe = base.makeModulePipe(moduleName)
# If the requested pipe fails for some reason, we'll use the
# regular pipe.
if not self.pipe:
self.pipe = base.pipe
self.win = base.graphicsEngine.makeOutput(self.pipe, name, 0, fbprops,
props, flags)
assert self.win
# We should render at the end of the frame.
self.win.setSort(10000)
# We don't need to clear the color buffer, since we'll be
# filling it with a texture. We also don't need to clear the
# depth buffer, since we won't be using it.
self.win.setClearColorActive(False)
self.win.setClearDepthActive(False)
eventName = '%s-window' % (self.name)
self.win.setWindowEvent(eventName)
self.accept(eventName, self.windowEvent)
# Listen for this event so we can update appropriately, if
# anyone changes the window's graphics memory limit,
self.accept('graphics_memory_limit_changed',
self.graphicsMemoryLimitChanged)
# We'll need a mouse object to get mouse events.
self.mouse = base.dataRoot.attachNewNode(MouseAndKeyboard(self.win, 0, '%s-mouse' % (self.name)))
bt = ButtonThrower('%s-thrower' % (self.name))
self.mouse.attachNewNode(bt)
bt.setPrefix('button-%s-' % (self.name))
self.accept('button-%s-mouse1' % (self.name), self.mouseClick)
self.setupGui()
self.setupCanvas()
# Now start handling up the actual stuff in the scene.
self.background = None
self.nextTexRecordKey = 0
self.rollover = None
self.isolate = None
self.isolated = None
self.needsRepack = False
# How frequently should the texture memory window check for
# state changes?
updateInterval = base.config.GetDouble("tex-mem-update-interval", 0.5)
self.task = taskMgr.doMethodLater(updateInterval, self.updateTextures, 'TexMemWatcher')
self.setLimit(limit)
def setupGui(self):
""" Creates the gui elements and supporting structures. """
self.render2d = NodePath('render2d')
self.render2d.setDepthTest(False)
self.render2d.setDepthWrite(False)
self.render2d.setTwoSided(True)
self.render2d.setBin('unsorted', 0)
# Create a DisplayRegion and an associated camera.
dr = self.win.makeDisplayRegion()
cam = Camera('cam2d')
self.lens = OrthographicLens()
self.lens.setNearFar(-1000, 1000)
self.lens.setFilmSize(2, 2)
cam.setLens(self.lens)
np = self.render2d.attachNewNode(cam)
dr.setCamera(np)
self.aspect2d = self.render2d.attachNewNode('aspect2d')
cm = CardMaker('statusBackground')
cm.setColor(0.85, 0.85, 0.85, 1)
cm.setFrame(0, 2, 0, 2)
self.statusBackground = self.render2d.attachNewNode(cm.generate(), -1)
self.statusBackground.setPos(-1, 0, -1)
self.status = self.aspect2d.attachNewNode('status')
self.statusText = TextNode('statusText')
self.statusText.setTextColor(0, 0, 0, 1)
self.statusTextNP = self.status.attachNewNode(self.statusText)
self.statusTextNP.setScale(1.5)
self.sizeText = TextNode('sizeText')
self.sizeText.setTextColor(0, 0, 0, 1)
self.sizeText.setAlign(TextNode.ARight)
self.sizeText.setCardAsMargin(0.25, 0, 0, -0.25)
self.sizeText.setCardColor(0.85, 0.85, 0.85, 1)
self.sizeTextNP = self.status.attachNewNode(self.sizeText)
self.sizeTextNP.setScale(1.5)
def setupCanvas(self):
""" Creates the "canvas", which is the checkerboard area where
texture memory is laid out. The canvas has its own
DisplayRegion. """
self.canvasRoot = NodePath('canvasRoot')
self.canvasRoot.setDepthTest(False)
self.canvasRoot.setDepthWrite(False)
self.canvasRoot.setTwoSided(True)
self.canvasRoot.setBin('unsorted', 0)
self.canvas = self.canvasRoot.attachNewNode('canvas')
# Create a DisplayRegion and an associated camera.
self.canvasDR = self.win.makeDisplayRegion()
self.canvasDR.setSort(-10)
cam = Camera('cam2d')
self.canvasLens = OrthographicLens()
self.canvasLens.setNearFar(-1000, 1000)
cam.setLens(self.canvasLens)
np = self.canvasRoot.attachNewNode(cam)
self.canvasDR.setCamera(np)
# Create a MouseWatcher so we can interact with the various
# textures.
self.mw = MouseWatcher('%s-watcher' % (self.name))
self.mw.setDisplayRegion(self.canvasDR)
mwnp = self.mouse.attachNewNode(self.mw)
eventName = '%s-enter' % (self.name)
self.mw.setEnterPattern(eventName)
self.accept(eventName, self.enterRegion)
eventName = '%s-leave' % (self.name)
self.mw.setLeavePattern(eventName)
self.accept(eventName, self.leaveRegion)
# Create a checkerboard background card for the canvas.
p = PNMImage(2, 2, 1)
p.setGray(0, 0, 0.40)
p.setGray(1, 1, 0.40)
p.setGray(0, 1, 0.75)
p.setGray(1, 0, 0.75)
self.checkTex = Texture('checkTex')
self.checkTex.load(p)
self.checkTex.setMagfilter(Texture.FTNearest)
self.canvasBackground = None
self.makeCanvasBackground()
def makeCanvasBackground(self):
if self.canvasBackground:
self.canvasBackground.removeNode()
self.canvasBackground = self.canvasRoot.attachNewNode('canvasBackground', -100)
cm = CardMaker('background')
cm.setFrame(0, 1, 0, 1)
cm.setUvRange((0, 0), (1, 1))
self.canvasBackground.attachNewNode(cm.generate())
cm.setFrame(0, 1, 1, self.top)
cm.setUvRange((0, 1), (1, self.top))
bad = self.canvasBackground.attachNewNode(cm.generate())
bad.setColor((0.8, 0.2, 0.2, 1))
self.canvasBackground.setTexture(self.checkTex)
def setLimit(self, limit = None):
""" Indicates the texture memory limit. If limit is None or
unspecified, the limit is taken from the GSG, if any; or there
is no limit. """
self.__doSetLimit(limit)
self.reconfigureWindow()
def __doSetLimit(self, limit):
""" Internal implementation of setLimit(). """
self.limit = limit
self.lruLimit = False
self.dynamicLimit = False
if not limit:
# If no limit | |
# main entry point to start training of a single model for target-dependent sentiment analysis in news
# author: <NAME> <<EMAIL>>
# This file is based on https://github.com/songyouwei/ABSA-PyTorch/blob/master/train.py
# original author: songyouwei <<EMAIL>>
import argparse
import math
import os
import random
import sys
import time
import numpy
import torch
import torch.nn as nn
from jsonlines import jsonlines
from models.fx import FX_BERT
cur_path = os.path.dirname(os.path.realpath(__file__))
par_path = os.path.dirname(cur_path)
sys.path.append(cur_path)
sys.path.append(par_path)
from newstsc.crossentropylosslsr import CrossEntropyLoss_LSR
from newstsc.dataset import FXDataset, RandomOversampler
from newstsc.earlystopping import EarlyStopping
from newstsc.evaluator import Evaluator
from newstsc.fxlogger import get_logger
from newstsc.models.aen import AEN_Base
from newstsc.models.lcf import lcf_bert
from newstsc.models.ram import RAM
from newstsc.models.spc import SPC_Base
from newstsc.models.globalsenti import Global_LCF
from newstsc.plotter_utils import create_save_plotted_confusion_matrix
from newstsc.tokenizers import (
Tokenizer4Bert,
Tokenizer4Distilbert,
Tokenizer4GloVe,
Tokenizer4Roberta,
)
from pytorch_transformers import (
BertModel,
DistilBertModel,
RobertaModel,
PreTrainedModel,
)
from torch.utils.data import DataLoader, random_split, ConcatDataset
logger = get_logger()
class Instructor:
def __init__(self, opt):
self.opt = opt
self.create_model()
logger.info("initialized pretrained model: {}".format(opt.model_name))
self.polarity_associations = {"positive": 2, "neutral": 1, "negative": 0}
self.polarity_associations_inv = {2: "positive", 1: "neutral", 0: "negative"}
self.sorted_expected_label_values = [0, 1, 2]
self.sorted_expected_label_names = ["negative", "neutral", "positive"]
self.evaluator = Evaluator(
self.sorted_expected_label_values, self.polarity_associations, self.opt.snem
)
self.trainset = None
self.devset = None
self.testset = None
self.all_datasets = None
self.crossvalset = None
self.dependency_parser = None
if self.opt.focus_mode:
# self.dependency_parser = DependencyParser(
# self.opt.focus_mode, self.opt.max_seq_len
# )
pass
if self.opt.training_mode:
self.load_datasets()
self._print_args()
def _load_dataset(self, path):
return FXDataset(
path,
self.tokenizer,
self.polarity_associations,
self.sorted_expected_label_names,
self.opt.use_tp_placeholders,
self.opt.task_format,
self.opt.devmode,
self.opt.use_global_context,
self.dependency_parser,
)
def load_datasets(self):
if self.opt.crossval > 0:
logger.info(
"loading datasets {} from {}".format(
self.opt.dataset_name, self.opt.dataset_path
)
)
self.crossvalset = self._load_dataset(
self.opt.dataset_path + "crossval.jsonl"
)
self.testset = self._load_dataset(self.opt.dataset_path + "test.jsonl")
self.all_datasets = [self.crossvalset, self.testset]
logger.info(
"loaded crossval datasets from {}".format(self.opt.dataset_path)
)
else:
logger.info(
"loading datasets {} from {}".format(
self.opt.dataset_name, self.opt.dataset_path
)
)
self.trainset = self._load_dataset(self.opt.dataset_path + "train.jsonl")
self.devset = self._load_dataset(self.opt.dataset_path + "dev.jsonl")
self.testset = self._load_dataset(self.opt.dataset_path + "test.jsonl")
self.all_datasets = [self.trainset, self.devset, self.testset]
logger.info("loaded datasets from {}".format(self.opt.dataset_path))
logger.info(
"truncated sequences of in total: {} / {}".format(
self.tokenizer.count_truncated,
self.tokenizer.count_all_sequences_where_we_count_truncation,
)
)
logger.info(
"truncated long docs: {}".format(self.tokenizer.count_truncated_long_docs)
)
def _print_args(self):
n_trainable_params, n_nontrainable_params = 0, 0
for p in self.model.parameters():
n_params = torch.prod(torch.tensor(p.shape))
if p.requires_grad:
n_trainable_params += n_params
else:
n_nontrainable_params += n_params
logger.info(
"n_trainable_params: {0}, n_nontrainable_params: {1}".format(
n_trainable_params, n_nontrainable_params
)
)
logger.info("> training arguments:")
for arg in vars(self.opt):
logger.info(">>> {0}: {1}".format(arg, getattr(self.opt, arg)))
def create_model(self, only_model=False):
logger.info("creating model {}".format(self.opt.model_name))
if self.opt.model_name in [
"aen_bert",
"aen_distilbert",
"aen_roberta",
"aen_distilroberta",
"spc_distilbert",
"spc_bert",
"spc_roberta",
"lcf_bert",
"fx_bert",
]:
if not only_model:
if self.opt.model_name in [
"aen_bert",
"spc_bert",
"lcf_bert",
"fx_bert",
]:
self.tokenizer = Tokenizer4Bert(
self.opt.pretrained_model_name,
self.opt.max_seq_len,
self.opt.global_context_seqs_per_doc,
)
elif self.opt.model_name in ["aen_distilbert", "spc_distilbert"]:
self.tokenizer = Tokenizer4Distilbert(
self.opt.pretrained_model_name, self.opt.max_seq_len,
)
elif self.opt.model_name in ["aen_roberta", "spc_roberta"]:
self.tokenizer = Tokenizer4Roberta(
self.opt.pretrained_model_name, self.opt.max_seq_len,
)
elif self.opt.model_name in ["aen_distilroberta", "spc_distiloberta"]:
self.tokenizer = Tokenizer4Roberta(
self.opt.pretrained_model_name, self.opt.max_seq_len,
)
if not os.path.isdir(self.opt.pretrained_model_name):
pretrained_model = torch.hub.load('huggingface/transformers', 'model', self.opt.pretrained_model_name)
elif self.opt.model_name in ["aen_bert", "spc_bert", "lcf_bert", "fx_bert"]:
pretrained_model = BertModel.from_pretrained(
self.opt.pretrained_model_name, output_hidden_states=True
)
elif self.opt.model_name in ["aen_distilbert", "spc_distilbert"]:
pretrained_model = DistilBertModel.from_pretrained(
self.opt.pretrained_model_name, output_hidden_states=True
)
elif self.opt.model_name in ["aen_roberta", "spc_roberta"]:
pretrained_model = RobertaModel.from_pretrained(
self.opt.pretrained_model_name, output_hidden_states=True
)
if self.opt.state_dict == "pretrained":
try:
self.model = self.opt.model_class(
pretrained_model,
self.opt,
pretrained=self.opt.state_dict == "pretrained",
map_location=self.opt.device
).to(self.opt.device)
except TypeError as e:
logger.error("The selected model does not support the 'pretrained'-keyword for state_dict")
exit(1)
else:
self.model = self.opt.model_class(
pretrained_model,
self.opt
).to(self.opt.device)
if self.opt.state_dict and self.opt.state_dict != "pretrained":
# load weights from the state_dict
logger.info(f"loading weights from {self.opt.state_dict}")
self.model.load_state_dict(
torch.load(self.opt.state_dict, map_location=self.opt.device)
)
elif self.opt.model_name in ["aen_glove", "ram"]:
if not only_model:
self.tokenizer = Tokenizer4GloVe(self.opt.max_seq_len)
if self.opt.model_name == "aen_glove":
self.model = self.opt.model_class(
self.tokenizer.embedding_matrix, self.opt
).to(self.opt.device)
elif self.opt.model_name == "ram":
self.model = self.opt.model_class(self.opt).to(self.opt.device)
else:
raise Exception("model_name unknown: {}".format(self.opt.model_name))
def _reset_params(self):
for child in self.model.children():
if not issubclass(
child.__class__, PreTrainedModel
): # if type(child) != BertModel: # skip bert params
for p in child.parameters():
if p.requires_grad:
if len(p.shape) > 1:
self.opt.initializer(p)
else:
stdv = 1.0 / math.sqrt(p.shape[0])
torch.nn.init.uniform_(p, a=-stdv, b=stdv)
def _create_prepare_model_path(self, snem, epoch, fold_number=None):
selected_model_filename = "{0}_{1}_val_{2}_{3}_epoch{4}".format(
self.opt.model_name,
self.opt.dataset_name,
self.opt.snem,
round(snem, 4),
epoch,
)
if fold_number is not None:
selected_model_filename += "_cvf" + str(fold_number)
pathdir = os.path.join(self.opt.experiment_path, "state_dict")
os.makedirs(pathdir, exist_ok=True)
selected_model_path = os.path.join(pathdir, selected_model_filename)
return selected_model_filename, selected_model_path
def _select_inputs(self, sample_batched):
"""
Selects the input data fields, thereby handles other options that influence data selection, too, e.g.,
use_global_context.
"""
inputs = [
sample_batched[col].to(self.opt.device) for col in self.opt.input_columns
]
if self.opt.use_global_context:
str_gci = "global_context_ids{}"
str_gct = "global_context_type_ids{}"
str_gca = "global_context_attention_mask{}"
for i in range(self.opt.global_context_seqs_per_doc):
gci = sample_batched[str_gci.format(i)]
gct = sample_batched[str_gct.format(i)]
gca = sample_batched[str_gca.format(i)]
inputs.append(gci.to(self.opt.device))
inputs.append(gct.to(self.opt.device))
inputs.append(gca.to(self.opt.device))
return inputs
def _train(
self, criterion, optimizer, train_data_loader, dev_data_loader, fold_number=None
):
global_step = 0
selected_model_path = None
selected_model_filename = None
selected_model_dev_stats = None
# initialize the early_stopping object
early_stopping = EarlyStopping()
for epoch in range(self.opt.num_epoch):
logger.info(">" * 100)
logger.info("epoch: {} (num_epoch: {})".format(epoch, self.opt.num_epoch))
n_correct, n_total, loss_total = 0, 0, 0
# switch model to training mode
self.model.train()
# train on batches
for i_batch, sample_batched in enumerate(train_data_loader):
global_step += 1
# clear gradient accumulators
optimizer.zero_grad()
inputs = self._select_inputs(sample_batched)
targets = sample_batched["polarity"].to(self.opt.device)
outputs = self.model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
n_correct += (torch.argmax(outputs, -1) == targets).sum().item()
n_total += len(outputs)
loss_total += loss.item() * len(outputs)
if global_step % self.opt.log_step == 0:
train_acc = n_correct / n_total
train_loss = loss_total / n_total
logger.info(
"loss: {:.4f}, acc: {:.4f}".format(train_loss, train_acc)
)
dev_stats = self._evaluate(dev_data_loader)
self.evaluator.print_stats(dev_stats, "dev during training")
dev_snem = dev_stats[self.opt.snem]
early_stopping(dev_snem)
if self.opt.eval_only_after_last_epoch:
if epoch >= self.opt.num_epoch - 1:
# return the model that was trained through all epochs as the selected model
logger.info("all epochs finished, saving model to disk...")
selected_model_dev_stats = dev_stats
(
selected_model_filename,
selected_model_path,
) = self._create_prepare_model_path(dev_snem, epoch, fold_number)
torch.save(self.model.state_dict(), selected_model_path)
logger.info(">> saved: {}".format(selected_model_path))
# save confusion matrices
filepath_stats_base = os.path.join(
self.opt.experiment_path, "statistics", selected_model_filename
)
if not filepath_stats_base.endswith("/"):
filepath_stats_base += "/"
os.makedirs(filepath_stats_base, exist_ok=True)
create_save_plotted_confusion_matrix(
dev_stats["confusion_matrix"],
expected_labels=self.sorted_expected_label_values,
basepath=filepath_stats_base,
)
logger.debug(
"created confusion matrices in path: {}".format(
filepath_stats_base
)
)
else:
# return the best model during any epoch
if early_stopping.flag_has_score_increased_since_last_check:
logger.info(
"model yields best performance so far, saving to disk..."
)
selected_model_dev_stats = dev_stats
(
selected_model_filename,
selected_model_path,
) = self._create_prepare_model_path(dev_snem, epoch, fold_number)
torch.save(self.model.state_dict(), selected_model_path)
logger.info(">> saved: {}".format(selected_model_path))
# save confusion matrices
filepath_stats_base = os.path.join(
self.opt.experiment_path, "statistics", selected_model_filename
)
if not filepath_stats_base.endswith("/"):
filepath_stats_base += "/"
os.makedirs(filepath_stats_base, exist_ok=True)
create_save_plotted_confusion_matrix(
dev_stats["confusion_matrix"],
expected_labels=self.sorted_expected_label_values,
basepath=filepath_stats_base,
)
logger.debug(
"created confusion matrices in path: {}".format(
filepath_stats_base
)
)
if early_stopping.early_stop and self.opt.use_early_stopping:
logger.info(
"early stopping after {} epochs without improvement, total epochs: {} of {}".format(
early_stopping.patience, epoch, self.opt.num_epoch
)
)
break
return selected_model_path, selected_model_filename, selected_model_dev_stats
def _evaluate(self, data_loader, get_examples=False, basepath=None):
t_labels_all, t_outputs_all = None, None
t_texts_all, t_targets_all = [], []
# switch model to evaluation mode
self.model.eval()
with torch.no_grad():
for t_batch, t_sample_batched in enumerate(data_loader):
t_inputs = self._select_inputs(t_sample_batched)
t_labels = t_sample_batched["polarity"].to(self.opt.device)
t_texts = t_sample_batched["orig_text"]
t_targets = t_sample_batched["orig_target"]
t_outputs = self.model(t_inputs)
if t_labels_all is None:
t_labels_all = t_labels
t_outputs_all = t_outputs
else:
t_labels_all = torch.cat((t_labels_all, t_labels), dim=0)
t_outputs_all = torch.cat((t_outputs_all, t_outputs), dim=0)
t_texts_all.extend(t_texts)
t_targets_all.extend(t_targets)
# softmax: get predictions from outputs
y_pred = torch.argmax(t_outputs_all, -1).cpu()
y_true = t_labels_all.cpu()
stats = self.evaluator.calc_statistics(y_true, y_pred)
if get_examples:
self.evaluator.write_error_table(
y_true,
y_pred,
t_texts_all,
t_targets_all,
basepath + "errortable.jsonl",
)
return stats
def get_normalized_inv_class_frequencies(self):
inv_freqs = []
for label_name in self.sorted_expected_label_names:
inv_freq_of_class = 1.0 / self.testset.label_counter[label_name]
inv_freqs.append(inv_freq_of_class)
sum_of_inv_freqs = sum(inv_freqs)
for i in range(len(inv_freqs)):
inv_freqs[i] = inv_freqs[i] / sum_of_inv_freqs
return inv_freqs
def run_crossval(self):
raise Exception(
"run_crossval needs to get updated as to saving its experiment results, and because the new "
"reset_params does not - as before - load the model newly, but only resets params as in ABSA. "
"also lossweighting has been adapted"
)
# Loss and Optimizer
if self.opt.lossweighting:
inv_class_freqs = self.get_normalized_inv_class_frequencies()
logger.info("weighting losses of classes: {}".format(inv_class_freqs))
class_weights = torch.tensor(inv_class_freqs).to(self.opt.device)
else:
class_weights = None
if self.opt.lsr:
criterion = CrossEntropyLoss_LSR(
self.opt.device, para_LSR=0.2, weight=class_weights
)
else:
criterion = nn.CrossEntropyLoss(weight=class_weights)
_params = filter(lambda p: p.requires_grad, self.model.parameters())
optimizer = self.opt.optimizer(
_params, lr=self.opt.learning_rate, weight_decay=self.opt.l2reg
)
test_data_loader = DataLoader(
dataset=self.testset, batch_size=self.opt.batch_size, shuffle=False
)
valset_len = len(self.crossvalset) // self.opt.crossval
splitedsets = random_split(
self.crossvalset,
tuple(
[valset_len] * (self.opt.crossval - 1)
+ [len(self.crossvalset) - valset_len * (self.opt.crossval - 1)]
),
)
logger.info("starting training...")
all_test_stats = []
for fid in range(self.opt.crossval):
logger.info(">" * 100)
logger.info("fold : {}".format(fid))
trainset = ConcatDataset([x for i, x in enumerate(splitedsets) if i != fid])
valset = splitedsets[fid]
train_data_loader = DataLoader(
dataset=trainset, batch_size=self.opt.batch_size, shuffle=True
)
| |
<gh_stars>1-10
import torch
from torch.autograd import Function
from collections import namedtuple
from pynvrtc.compiler import Program
from cupy.cuda import function
import numpy as np
from cuda.utils import *
from cuda.bigram_rrnn import *
from cuda.bigram_rrnn_semiring import *
from cuda.unigram_rrnn import *
from cuda.fourgram_rrnn import *
from cuda.fourgram_rrnn_semiring import *
from cuda.threegram_rrnn import *
from cuda.threegram_rrnn_semiring import *
from cuda.twogram_rrnn import *
from cuda.twogram_rrnn_semiring import *
from cuda.onegram_rrnn import *
from cuda.onegram_rrnn_semiring import *
class RRNN_Unigram_Compute_GPU(Function):
#_RRNN_PROG = Program((UTIL + UNIGRAM_RRNN).encode("utf-8"), "rrnn_prog.cu".encode())
#_RRNN_PTX = _RRNN_PROG.compile()
#_DEVICE2FUNC = {}
def __init__(self, d_out, k, semiring, bidirectional=False):
super(RRNN_Unigram_Compute_GPU, self).__init__()
self.semiring = semiring
self.d_out = d_out
self.k = k
self.bidirectional = bidirectional
assert not bidirectional
def compile_functions(self):
device = torch.cuda.current_device()
print ("RRNN loaded for gpu {}".format(device))
mod = function.Module()
mod.load(bytes(self._RRNN_PTX.encode()))
if self.semiring.type == 0:
fwd_func = mod.get_function("rrnn_fwd")
bwd_func = mod.get_function("rrnn_bwd")
Stream = namedtuple("Stream", ["ptr"])
current_stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)
self._DEVICE2FUNC[device] = (
current_stream, fwd_func, bwd_func,
)
return current_stream, fwd_func, bwd_func
else:
fwd_func = mod.get_function("rrnn_semiring_fwd")
bwd_func = mod.get_function("rrnn_semiring_bwd")
Stream = namedtuple("Stream", ["ptr"])
current_stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)
self._DEVICE2FUNC[device] = (
current_stream, fwd_func, bwd_func
)
return current_stream, fwd_func, bwd_func
def get_functions(self):
res = self._DEVICE2FUNC.get(torch.cuda.current_device(), None)
return res if res else self.compile_functions()
def forward(self, u, c_init=None):
bidir = 2 if self.bidirectional else 1
assert u.size(-1) == self.k
length, batch = u.size(0), u.size(1)
dim = self.d_out
ncols = batch*dim*bidir
thread_per_block = min(512, ncols)
num_block = (ncols-1)//thread_per_block+1
if c_init is None:
assert False
size = (length, batch, bidir, dim)
cs = u.new(*size)
stream, fwd_func, _ = self.get_functions()
FUNC = fwd_func
FUNC(args=[
u.contiguous().data_ptr(),
c_init.contiguous().data_ptr(),
np.int32(length),
np.int32(batch),
np.int32(dim),
np.int32(self.k),
cs.data_ptr(),
np.int32(self.semiring.type)],
block = (thread_per_block,1,1), grid = (num_block,1,1),
stream=stream
)
self.save_for_backward(u, c_init)
self.intermediate_cs = cs
if self.bidirectional:
last_c = torch.cat((cs[-1,:,0,:], cs[0,:,1,:]), dim=1)
else:
last_c = cs[-1,...].view(batch, -1)
return cs, last_c
def backward(self, grad_cs, grad_last_c):
bidir = 2 if self.bidirectional else 1
u, c_init = self.saved_tensors
cs = self.intermediate_cs
length, batch = u.size(0), u.size(1)
dim = self.d_out
ncols = batch*dim*bidir
thread_per_block = min(512, ncols)
num_block = (ncols-1)//thread_per_block+1
if c_init is None:
assert False
# init_ = x.new(ncols).zero_() if init is None else init
grad_u = u.new(*u.size())
grad_init_c = u.new(batch, dim*bidir)
stream, _, bwd_func = self.get_functions()
FUNC = bwd_func
FUNC(args=[
u.contiguous().data_ptr(),
c_init.contiguous().data_ptr(),
cs.data_ptr(),
grad_cs.data_ptr(),
grad_last_c.contiguous().data_ptr(),
np.int32(length),
np.int32(batch),
np.int32(dim),
np.int32(self.k),
grad_u.data_ptr(),
grad_init_c.data_ptr(),
np.int32(self.semiring.type)],
block = (thread_per_block,1,1), grid = (num_block,1,1),
stream=stream
)
return grad_u, grad_init_c
class RRNN_Bigram_Compute_GPU(Function):
_RRNN_PROG = Program((UTIL + BIGRAM_RRNN + BIGRAM_RRNN_SEMIRING).encode("utf-8"), "rrnn_prog.cu".encode())
_RRNN_PTX = _RRNN_PROG.compile()
_DEVICE2FUNC = {}
def __init__(self, d_out, k, semiring, bidirectional=False):
super(RRNN_Bigram_Compute_GPU, self).__init__()
self.semiring = semiring
self.d_out = d_out
self.k = k
self.bidirectional = bidirectional
assert not bidirectional
def compile_functions(self):
device = torch.cuda.current_device()
print ("RRNN loaded for gpu {}".format(device))
mod = function.Module()
mod.load(bytes(self._RRNN_PTX.encode()))
if self.semiring.type == 0:
fwd_func = mod.get_function("rrnn_fwd")
bwd_func = mod.get_function("rrnn_bwd")
Stream = namedtuple("Stream", ["ptr"])
current_stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)
self._DEVICE2FUNC[device] = (
current_stream, fwd_func, bwd_func,
)
return current_stream, fwd_func, bwd_func
else:
fwd_func = mod.get_function("rrnn_semiring_fwd")
bwd_func = mod.get_function("rrnn_semiring_bwd")
Stream = namedtuple("Stream", ["ptr"])
current_stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)
self._DEVICE2FUNC[device] = (
current_stream, fwd_func, bwd_func
)
return current_stream, fwd_func, bwd_func
def get_functions(self):
res = self._DEVICE2FUNC.get(torch.cuda.current_device(), None)
return res if res else self.compile_functions()
def forward(self, u, c1_init=None, c2_init=None, eps=None):
bidir = 2 if self.bidirectional else 1
assert u.size(-1) == self.k
length, batch = u.size(0), u.size(1)
dim = self.d_out
ncols = batch*dim*bidir
thread_per_block = min(512, ncols)
num_block = (ncols-1)//thread_per_block+1
if c1_init is None:
assert False
size = (length, batch, bidir, dim)
c1s = u.new(*size)
c2s = u.new(*size)
stream, fwd_func, _ = self.get_functions()
FUNC = fwd_func
FUNC(args=[
u.contiguous().data_ptr(),
eps.contiguous().data_ptr(),
c1_init.contiguous().data_ptr(),
c2_init.contiguous().data_ptr(),
np.int32(length),
np.int32(batch),
np.int32(dim),
np.int32(self.k),
c1s.data_ptr(),
c2s.data_ptr(),
np.int32(self.semiring.type)],
block = (thread_per_block,1,1), grid = (num_block,1,1),
stream=stream
)
self.save_for_backward(u, eps, c1_init, c2_init)
self.intermediate_c1s, self.intermediate_c2s = c1s, c2s
if self.bidirectional:
last_c1, last_c2 \
= torch.cat((c1s[-1,:,0,:], c1s[0,:,1,:]), dim=1), \
torch.cat((c2s[-1,:,0,:], c2s[0,:,1,:]), dim=1)
else:
last_c1 = c1s[-1,...].view(batch, -1)
last_c2 = c2s[-1,...].view(batch, -1)
return c1s, c2s, last_c1, last_c2
def backward(self, grad_c1s, grad_c2s, grad_last_c1, grad_last_c2):
bidir = 2 if self.bidirectional else 1
u, eps, c1_init, c2_init = self.saved_tensors
c1s, c2s = self.intermediate_c1s, self.intermediate_c2s
length, batch = u.size(0), u.size(1)
dim = self.d_out
ncols = batch*dim*bidir
thread_per_block = min(512, ncols)
num_block = (ncols-1)//thread_per_block+1
if c1_init is None:
assert False
# init_ = x.new(ncols).zero_() if init is None else init
grad_u = u.new(*u.size())
grad_eps = eps.new(*eps.size())
grad_init_c1 = u.new(batch, dim*bidir)
grad_init_c2 = u.new(batch, dim*bidir)
stream, _, bwd_func = self.get_functions()
FUNC = bwd_func
FUNC(args=[
u.contiguous().data_ptr(),
eps.contiguous().data_ptr(),
c1_init.contiguous().data_ptr(),
c2_init.contiguous().data_ptr(),
c1s.data_ptr(),
c2s.data_ptr(),
grad_c1s.data_ptr(),
grad_c2s.data_ptr(),
grad_last_c1.contiguous().data_ptr(),
grad_last_c2.contiguous().data_ptr(),
np.int32(length),
np.int32(batch),
np.int32(dim),
np.int32(self.k),
grad_u.data_ptr(),
grad_eps.data_ptr(),
grad_init_c1.data_ptr(),
grad_init_c2.data_ptr(),
np.int32(self.semiring.type)],
block = (thread_per_block,1,1), grid = (num_block,1,1),
stream=stream
)
return grad_u, grad_init_c1, grad_init_c2, grad_eps
class RRNN_1gram_Compute_GPU(Function):
_RRNN_PROG = Program((UTIL + ONEGRAM_RRNN + ONEGRAM_RRNN_SEMIRING).encode("utf-8"), "rrnn_prog.cu".encode())
_RRNN_PTX = _RRNN_PROG.compile()
_DEVICE2FUNC = {}
def __init__(self, d_out, k, semiring, bidirectional=False):
super(RRNN_1gram_Compute_GPU, self).__init__()
self.semiring = semiring
self.d_out = d_out
self.k = k
self.bidirectional = bidirectional
assert not bidirectional
def compile_functions(self):
device = torch.cuda.current_device()
print ("RRNN loaded for gpu {}".format(device))
mod = function.Module()
mod.load(bytes(self._RRNN_PTX.encode()))
if self.semiring.type == 0:
fwd_func = mod.get_function("rrnn_fwd")
bwd_func = mod.get_function("rrnn_bwd")
Stream = namedtuple("Stream", ["ptr"])
current_stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)
self._DEVICE2FUNC[device] = (
current_stream, fwd_func, bwd_func,
)
return current_stream, fwd_func, bwd_func
else:
fwd_func = mod.get_function("rrnn_semiring_fwd")
bwd_func = mod.get_function("rrnn_semiring_bwd")
Stream = namedtuple("Stream", ["ptr"])
current_stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)
self._DEVICE2FUNC[device] = (
current_stream, fwd_func, bwd_func
)
return current_stream, fwd_func, bwd_func
def get_functions(self):
res = self._DEVICE2FUNC.get(torch.cuda.current_device(), None)
return res if res else self.compile_functions()
def forward(self, u, c1_init=None):
bidir = 2 if self.bidirectional else 1
assert u.size(-1) == self.k
length, batch = u.size(0), u.size(1)
dim = self.d_out
ncols = batch*dim*bidir
thread_per_block = min(512, ncols)
num_block = (ncols-1)//thread_per_block+1
if c1_init is None:
assert False
size = (length, batch, bidir, dim)
c1s = u.new(*size)
stream, fwd_func, _ = self.get_functions()
FUNC = fwd_func
FUNC(args=[
u.contiguous().data_ptr(),
c1_init.contiguous().data_ptr(),
np.int32(length),
np.int32(batch),
np.int32(dim),
np.int32(self.k),
c1s.data_ptr(),
np.int32(self.semiring.type)],
block = (thread_per_block,1,1), grid = (num_block,1,1),
stream=stream
)
self.save_for_backward(u, c1_init)
self.intermediate_c1s = c1s
if self.bidirectional:
assert False, "bidirectionality isn't implemented yet"
else:
last_c1 = c1s[-1,...].view(batch, -1)
return c1s, last_c1
def backward(self, grad_c1s, grad_last_c1):
bidir = 2 if self.bidirectional else 1
u, c1_init = self.saved_tensors
c1s = self.intermediate_c1s
length, batch = u.size(0), u.size(1)
dim = self.d_out
ncols = batch*dim*bidir
thread_per_block = min(512, ncols)
num_block = (ncols-1)//thread_per_block+1
if c1_init is None:
assert False
# init_ = x.new(ncols).zero_() if init is None else init
grad_u = u.new(*u.size())
grad_init_c1 = u.new(batch, dim*bidir)
stream, _, bwd_func = self.get_functions()
FUNC = bwd_func
FUNC(args=[
u.contiguous().data_ptr(),
c1_init.contiguous().data_ptr(),
c1s.data_ptr(),
grad_c1s.data_ptr(),
grad_last_c1.contiguous().data_ptr(),
np.int32(length),
np.int32(batch),
np.int32(dim),
np.int32(self.k),
grad_u.data_ptr(),
grad_init_c1.data_ptr(),
np.int32(self.semiring.type)],
block = (thread_per_block,1,1), grid = (num_block,1,1),
stream=stream
)
return grad_u, grad_init_c1
class RRNN_2gram_Compute_GPU(Function):
_RRNN_PROG = Program((UTIL + TWOGRAM_RRNN + TWOGRAM_RRNN_SEMIRING).encode("utf-8"), "rrnn_prog.cu".encode())
_RRNN_PTX = _RRNN_PROG.compile()
_DEVICE2FUNC = {}
def __init__(self, d_out, k, semiring, bidirectional=False):
super(RRNN_2gram_Compute_GPU, self).__init__()
self.semiring = semiring
self.d_out = d_out
self.k = k
self.bidirectional = bidirectional
assert not bidirectional
def compile_functions(self):
device = torch.cuda.current_device()
print ("RRNN loaded for gpu {}".format(device))
mod = function.Module()
mod.load(bytes(self._RRNN_PTX.encode()))
if self.semiring.type == 0:
fwd_func = mod.get_function("rrnn_fwd")
bwd_func = mod.get_function("rrnn_bwd")
Stream = namedtuple("Stream", ["ptr"])
current_stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)
self._DEVICE2FUNC[device] = (
current_stream, fwd_func, bwd_func,
)
return current_stream, fwd_func, bwd_func
else:
fwd_func = mod.get_function("rrnn_semiring_fwd")
bwd_func = mod.get_function("rrnn_semiring_bwd")
Stream = namedtuple("Stream", ["ptr"])
current_stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)
self._DEVICE2FUNC[device] = (
current_stream, fwd_func, bwd_func
)
return current_stream, fwd_func, bwd_func
def get_functions(self):
res = self._DEVICE2FUNC.get(torch.cuda.current_device(), None)
return res if res else self.compile_functions()
def forward(self, u, c1_init=None, c2_init=None):
bidir = 2 if self.bidirectional else 1
assert u.size(-1) == self.k
length, batch = u.size(0), u.size(1)
dim = self.d_out
ncols = batch*dim*bidir
thread_per_block = min(512, ncols)
num_block = (ncols-1)//thread_per_block+1
if c1_init is None:
assert False
size = (length, batch, bidir, dim)
c1s = u.new(*size)
c2s = u.new(*size)
stream, fwd_func, _ = self.get_functions()
FUNC = fwd_func
FUNC(args=[
u.contiguous().data_ptr(),
c1_init.contiguous().data_ptr(),
c2_init.contiguous().data_ptr(),
np.int32(length),
np.int32(batch),
np.int32(dim),
np.int32(self.k),
c1s.data_ptr(),
c2s.data_ptr(),
np.int32(self.semiring.type)],
block = (thread_per_block,1,1), grid = (num_block,1,1),
stream=stream
)
self.save_for_backward(u, c1_init, c2_init)
self.intermediate_c1s, self.intermediate_c2s = c1s, c2s
if self.bidirectional:
assert False, "bidirectionality isn't implemented yet"
else:
last_c1 = c1s[-1,...].view(batch, -1)
last_c2 = c2s[-1,...].view(batch, -1)
return c1s, c2s, last_c1, last_c2
def backward(self, grad_c1s, grad_c2s, grad_last_c1, grad_last_c2):
bidir = 2 if self.bidirectional else 1
u, c1_init, c2_init = self.saved_tensors
c1s, c2s = self.intermediate_c1s, self.intermediate_c2s
length, batch = u.size(0), u.size(1)
dim = self.d_out
ncols = batch*dim*bidir
thread_per_block = min(512, ncols)
num_block = (ncols-1)//thread_per_block+1
if c1_init is None:
assert False
# | |
= {}
body_params = None
if 'entity_neighbors_filter' in local_var_params:
body_params = local_var_params['entity_neighbors_filter']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/datastore/data/set/{entitySetId}/neighbors', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='int', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def delete_entity(self, entity_set_id, entity_key_id, type, **kwargs): # noqa: E501
"""Deletes a single entity from an entity set. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_entity(entity_set_id, entity_key_id, type, async_req=True)
>>> result = thread.get()
:param entity_set_id: (required)
:type entity_set_id: str
:param entity_key_id: (required)
:type entity_key_id: str
:param type: (required)
:type type: str
:param block:
:type block: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.delete_entity_with_http_info(entity_set_id, entity_key_id, type, **kwargs) # noqa: E501
def delete_entity_with_http_info(self, entity_set_id, entity_key_id, type, **kwargs): # noqa: E501
"""Deletes a single entity from an entity set. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_entity_with_http_info(entity_set_id, entity_key_id, type, async_req=True)
>>> result = thread.get()
:param entity_set_id: (required)
:type entity_set_id: str
:param entity_key_id: (required)
:type entity_key_id: str
:param type: (required)
:type type: str
:param block:
:type block: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'entity_set_id',
'entity_key_id',
'type',
'block'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_entity" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'entity_set_id' is set
if self.api_client.client_side_validation and ('entity_set_id' not in local_var_params or # noqa: E501
local_var_params['entity_set_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity_set_id` when calling `delete_entity`") # noqa: E501
# verify the required parameter 'entity_key_id' is set
if self.api_client.client_side_validation and ('entity_key_id' not in local_var_params or # noqa: E501
local_var_params['entity_key_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity_key_id` when calling `delete_entity`") # noqa: E501
# verify the required parameter 'type' is set
if self.api_client.client_side_validation and ('type' not in local_var_params or # noqa: E501
local_var_params['type'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `type` when calling `delete_entity`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_set_id' in local_var_params:
path_params['entitySetId'] = local_var_params['entity_set_id'] # noqa: E501
if 'entity_key_id' in local_var_params:
path_params['entityKeyId'] = local_var_params['entity_key_id'] # noqa: E501
query_params = []
if 'type' in local_var_params and local_var_params['type'] is not None: # noqa: E501
query_params.append(('type', local_var_params['type'])) # noqa: E501
if 'block' in local_var_params and local_var_params['block'] is not None: # noqa: E501
query_params.append(('block', local_var_params['block'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/datastore/data/set/{entitySetId}/{entityKeyId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def delete_entity_properties(self, entity_set_id, entity_key_id, type, request_body, **kwargs): # noqa: E501
"""Deletes properties from an entity. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_entity_properties(entity_set_id, entity_key_id, type, request_body, async_req=True)
>>> result = thread.get()
:param entity_set_id: (required)
:type entity_set_id: str
:param entity_key_id: (required)
:type entity_key_id: str
:param type: (required)
:type type: str
:param request_body: (required)
:type request_body: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.delete_entity_properties_with_http_info(entity_set_id, entity_key_id, type, request_body, **kwargs) # noqa: E501
def delete_entity_properties_with_http_info(self, entity_set_id, entity_key_id, type, request_body, **kwargs): # noqa: E501
"""Deletes properties from an entity. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_entity_properties_with_http_info(entity_set_id, entity_key_id, type, request_body, async_req=True)
>>> result = thread.get()
:param entity_set_id: (required)
:type entity_set_id: str
:param entity_key_id: (required)
:type entity_key_id: str
:param type: (required)
:type type: str
:param request_body: (required)
:type request_body: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'entity_set_id',
'entity_key_id',
'type',
'request_body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_entity_properties" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'entity_set_id' is set
if self.api_client.client_side_validation and ('entity_set_id' not in local_var_params or # noqa: E501
local_var_params['entity_set_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity_set_id` when calling `delete_entity_properties`") # noqa: E501
# verify the required parameter 'entity_key_id' is set
if self.api_client.client_side_validation and ('entity_key_id' not in local_var_params or # noqa: E501
local_var_params['entity_key_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity_key_id` when calling `delete_entity_properties`") # noqa: E501
# verify the required parameter 'type' is set
if self.api_client.client_side_validation and ('type' not in local_var_params or # noqa: E501
local_var_params['type'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `type` when calling `delete_entity_properties`") # noqa: E501
# verify the required parameter 'request_body' is set
if self.api_client.client_side_validation and ('request_body' not in local_var_params or # noqa: E501
local_var_params['request_body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `request_body` when calling `delete_entity_properties`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_set_id' in local_var_params:
path_params['entitySetId'] = local_var_params['entity_set_id'] # noqa: E501
if 'entity_key_id' in local_var_params:
path_params['entityKeyId'] = local_var_params['entity_key_id'] # noqa: E501
query_params = | |
= wx.StaticBoxSizer(self.inputMaxDigitsStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.inputMaxDigitsPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelInputMaxDigits = wx.StaticText(self.scollPanel, -1, "18、实数总位数(max_digits)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.inputMaxDigits = wx.TextCtrl(self.scollPanel, -1)
self.readmeInputMaxDigits = wx.StaticText(self.scollPanel, -1, "【实数总位数(max_digits)】** 整数位数和小数位数的总和,不包括小数点。")
self.inputMaxDigitsPanel.Add(self.labelInputMaxDigits, 0, wx.EXPAND | wx.ALL, 2)
self.inputMaxDigitsPanel.Add(self.inputMaxDigits, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeInputMaxDigits, 0, wx.EXPAND | wx.ALL, 2)
# 小数总位数(decimal_places)(默认为0)
self.inputDecimalPlacesStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.inputDecimalPlacesPanel = wx.StaticBoxSizer(self.inputDecimalPlacesStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.inputDecimalPlacesPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelInputDecimalPlaces = wx.StaticText(self.scollPanel, -1, "19、小数总位数(decimal_places)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.inputDecimalPlaces = wx.TextCtrl(self.scollPanel, -1)
self.readmeInputDecimalPlaces = wx.StaticText(self.scollPanel, -1, "【小数总位数(decimal_places)】** 小数位数的总和,不包括小数点。")
self.inputDecimalPlacesPanel.Add(self.labelInputDecimalPlaces, 0, wx.EXPAND | wx.ALL, 2)
self.inputDecimalPlacesPanel.Add(self.inputDecimalPlaces, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeInputDecimalPlaces, 0, wx.EXPAND | wx.ALL, 2)
# save调用更新日期(auto_now)
self.radiosAutoNowStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.radiosAutoNowPanel = wx.StaticBoxSizer(self.radiosAutoNowStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.radiosAutoNowPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelRadiosAutoNow = wx.StaticText(self.scollPanel, -1, "20、保存更新日期(auto_now):", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.radiosAutoNow = wx.RadioBox(self.scollPanel, -1, "", choices=['启用', '不启用'])
self.readmeRadiosAutoNow = wx.StaticText(self.scollPanel, -1, "【保存更新日期(auto_now)】** 仅在调用模型控制器的save()方法时自动更新该日期字段。")
self.radiosAutoNowPanel.Add(self.labelRadiosAutoNow, 0, wx.EXPAND | wx.ALL, 2)
self.radiosAutoNowPanel.Add(self.radiosAutoNow, 0, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeRadiosAutoNow, 0, wx.EXPAND | wx.ALL, 2)
# 仅创建时一次赋值日期(auto_now_add)
self.radiosAutoNowAddStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.radiosAutoNowAddPanel = wx.StaticBoxSizer(self.radiosAutoNowAddStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.radiosAutoNowAddPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelRadiosAutoNowAdd = wx.StaticText(self.scollPanel, -1, "21、仅创建时赋值日期(auto_now_add):", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.radiosAutoNowAdd = wx.RadioBox(self.scollPanel, -1, "", choices=['启用', '不启用'])
self.readmeRadiosAutoNowAdd = wx.StaticText(self.scollPanel, -1, "【创建赋值日期(auto_now_add)】** 仅在创建记录时一次赋值该日期,赋值后不允许修改。")
self.radiosAutoNowAddPanel.Add(self.labelRadiosAutoNowAdd, 0, wx.EXPAND | wx.ALL, 2)
self.radiosAutoNowAddPanel.Add(self.radiosAutoNowAdd, 0, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeRadiosAutoNowAdd, 0, wx.EXPAND | wx.ALL, 2)
# 文件上传路径(upload_to)
self.inputUploadToStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.inputUploadToPanel = wx.StaticBoxSizer(self.inputUploadToStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.inputUploadToPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelInputUploadTo = wx.StaticText(self.scollPanel, -1, "22、文件上传路径(upload_to)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.inputUploadTo = wx.TextCtrl(self.scollPanel, -1)
self.readmeInputUploadTo = wx.StaticText(self.scollPanel, -1, "【文件上传路径(upload_to)】** 指定文件上传路径。")
self.inputUploadToPanel.Add(self.labelInputUploadTo, 0, wx.EXPAND | wx.ALL, 2)
self.inputUploadToPanel.Add(self.inputUploadTo, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeInputUploadTo, 0, wx.EXPAND | wx.ALL, 2)
# 关联关系--模型下拉列表选择(多对一的一)
self.choiceSelectModelStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.choiceSelectModelPanel = wx.StaticBoxSizer(self.choiceSelectModelStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.choiceSelectModelPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelChoiceSelectModel = wx.StaticText(self.scollPanel, -1, "A、关联关系模型【外键关联模型】", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
# self.choiceSelectModel = wx.Choice(self.scollPanel, -1, choices = [' ']+['self'])
self.choiceSelectModel = wx.TextCtrl(self.scollPanel, -1)
self.readmeChoiceSelectModel = wx.StaticText(self.scollPanel, -1, " ** 多对一的一、一对一的一、多对多的多。如:Person、'Person'、'other_app.Person'。")
self.choiceSelectModelPanel.Add(self.labelChoiceSelectModel, 0, wx.EXPAND | wx.ALL, 2)
self.choiceSelectModelPanel.Add(self.choiceSelectModel, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeChoiceSelectModel, 0, wx.EXPAND | wx.ALL, 2)
# 删除规则【on_delete】
self.choiceSelectDelRuleStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.choiceSelectDelRulePanel = wx.StaticBoxSizer(self.choiceSelectDelRuleStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.choiceSelectDelRulePanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelChoiceSelectDelRule = wx.StaticText(self.scollPanel, -1, "B、删除规则(on_delete)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.choiceSelectDelRule = wx.Choice(self.scollPanel, -1, choices = [' ']+['models.CASCADE','models.SET_NULL','models.PROTECT','models.SET_DEFAULT','models.DO_NOTHING',])
self.readmeChoiceSelectDelRule = wx.StaticText(self.scollPanel, -1, " ** 默认级联删除。")
self.choiceSelectDelRulePanel.Add(self.labelChoiceSelectDelRule, 0, wx.EXPAND | wx.ALL, 2)
self.choiceSelectDelRulePanel.Add(self.choiceSelectDelRule, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeChoiceSelectDelRule, 0, wx.EXPAND | wx.ALL, 2)
# 备注名【verbose_name】
self.inputRelationRemarkStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.inputRelationRemarkPanel = wx.StaticBoxSizer(self.inputRelationRemarkStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.inputRelationRemarkPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelInputRelationRemark = wx.StaticText(self.scollPanel, -1, "C、关联字段备注名(verbose_name)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.inputRelationRemark = wx.TextCtrl(self.scollPanel, -1)
self.readmeInputRelationRemark = wx.StaticText(self.scollPanel, -1, " ** 后台显示的关联字段的可读名称。")
self.inputRelationRemarkPanel.Add(self.labelInputRelationRemark, 0, wx.EXPAND | wx.ALL, 2)
self.inputRelationRemarkPanel.Add(self.inputRelationRemark, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeInputRelationRemark, 0, wx.EXPAND | wx.ALL, 2)
# 筛选关联字段【limit_choices_to】
self.inputLimitChoicesToStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.inputLimitChoicesToPanel = wx.StaticBoxSizer(self.inputLimitChoicesToStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.inputLimitChoicesToPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelInputLimitChoicesTo = wx.StaticText(self.scollPanel, -1, "D、筛选关联字段【limit_choices_to】", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.inputLimitChoicesTo = wx.TextCtrl(self.scollPanel, -1)
self.readmeInputLimitChoicesTo = wx.StaticText(self.scollPanel, -1, " ** 如:{'is_staff': True}。也可为一个Q对象,或可回调函数返回字典/Q。")
self.inputLimitChoicesToPanel.Add(self.labelInputLimitChoicesTo, 0, wx.EXPAND | wx.ALL, 2)
self.inputLimitChoicesToPanel.Add(self.inputLimitChoicesTo, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeInputLimitChoicesTo, 0, wx.EXPAND | wx.ALL, 2)
# 反向名称(related_name)
self.inputRelatedNameStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.inputRelatedNamePanel = wx.StaticBoxSizer(self.inputRelatedNameStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.inputRelatedNamePanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelInputRelatedName = wx.StaticText(self.scollPanel, -1, "E、反向名称(related_name)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.inputRelatedName = wx.TextCtrl(self.scollPanel, -1)
self.readmeInputRelatedName = wx.StaticText(self.scollPanel, -1, " ** 被关联模型对象找到本模型对象的名称。赋值'+'关闭反向查找功能。抽象类必需。")
self.inputRelatedNamePanel.Add(self.labelInputRelatedName, 0, wx.EXPAND | wx.ALL, 2)
self.inputRelatedNamePanel.Add(self.inputRelatedName, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeInputRelatedName, 0, wx.EXPAND | wx.ALL, 2)
# 反向过滤器名称(related_query_name)
self.inputRelatedQueryNameStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.inputRelatedQueryNamePanel = wx.StaticBoxSizer(self.inputRelatedQueryNameStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.inputRelatedQueryNamePanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelInputRelatedQueryName = wx.StaticText(self.scollPanel, -1, "F、反向过滤器名称(related_query_name)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.inputRelatedQueryName = wx.TextCtrl(self.scollPanel, -1)
self.readmeInputRelatedQueryName = wx.StaticText(self.scollPanel, -1, " ** 默认取related_name的值。用于:tag__name='important'之类的反向过滤前缀。")
self.inputRelatedQueryNamePanel.Add(self.labelInputRelatedQueryName, 0, wx.EXPAND | wx.ALL, 2)
self.inputRelatedQueryNamePanel.Add(self.inputRelatedQueryName, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeInputRelatedQueryName, 0, wx.EXPAND | wx.ALL, 2)
# 指定关联外键(to_field)
self.inputToFieldStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.inputToFieldPanel = wx.StaticBoxSizer(self.inputToFieldStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.inputToFieldPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelInputToField = wx.StaticText(self.scollPanel, -1, "G、指定关联外键(to_field)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.inputToField = wx.TextCtrl(self.scollPanel, -1)
self.readmeInputToField = wx.StaticText(self.scollPanel, -1, " ** 默认取primary_key=True的字段。若要改变,必须是设置unique=True的字段。")
self.inputToFieldPanel.Add(self.labelInputToField, 0, wx.EXPAND | wx.ALL, 2)
self.inputToFieldPanel.Add(self.inputToField, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeInputToField, 0, wx.EXPAND | wx.ALL, 2)
# 外键约束(db_constraint)
self.radiosDBConstraintStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.radiosDBConstraintPanel = wx.StaticBoxSizer(self.radiosDBConstraintStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.radiosDBConstraintPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelRadiosDBConstraint = wx.StaticText(self.scollPanel, -1, "H、外键约束(db_constraint)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.radiosDBConstraint = wx.RadioBox(self.scollPanel, -1, "", choices=['开启', '关闭'])
self.readmeRadiosDBConstraint = wx.StaticText(self.scollPanel, -1, " ** 当有无效冗余数据或为共享数据库时可关闭,否则不建议关闭。")
self.radiosDBConstraintPanel.Add(self.labelRadiosDBConstraint, 0, wx.EXPAND | wx.ALL, 2)
self.radiosDBConstraintPanel.Add(self.radiosDBConstraint, 0, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeRadiosDBConstraint, 0, wx.EXPAND | wx.ALL, 2)
# 多对多中间表名(db_table)
self.inputDBTableStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.inputDBTablePanel = wx.StaticBoxSizer(self.inputDBTableStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.inputDBTablePanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelInputDBTable = wx.StaticText(self.scollPanel, -1, "I、多对多中间表名(db_table)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.inputDBTable = wx.TextCtrl(self.scollPanel, -1)
self.readmeInputDBTable = wx.StaticText(self.scollPanel, -1, " ** Django默认生成关联表的哈希值表名,保证值唯一。也可自己命名。")
self.inputDBTablePanel.Add(self.labelInputDBTable, 0, wx.EXPAND | wx.ALL, 2)
self.inputDBTablePanel.Add(self.inputDBTable, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeInputDBTable, 0, wx.EXPAND | wx.ALL, 2)
# all 交换类型(swappable)
# 多对多 指定多对多模型(through)
# 多对多 指定多对多模型外键(through_fields)
# 一对一 父类链接(parent_link)
# 暂时不开放上述参数
# 后触发按钮
self.afterBtns.extend([
self.btnResetInput, self.btnAddFieldToArea,
# self.btnExecSave,
])
# 所有的参数
self.allArgs.extend([
self.choiceFieldType, # 字段类型选择放这里不合理【暂时不调整】
self.inputFieldModelName, self.inputFieldDatabaseName, self.inputFieldRemarkName,
self.radiosFiledBlank, self.radiosFiledNull, self.radiosFiledPrimary, # 英文拼错了,不改了
self.radiosFiledUnique, self.radiosFiledDbIndex, self.radiosFiledEditable,
self.choicesFiledUniqueForDate, self.choicesFiledUniqueForMonth, self.choicesFiledUniqueForYear,
self.inputDefaultValue, self.inputFormHelpText, self.inputFormErrorMessage,
self.inputMaxLength, self.inputMaxDigits, self.inputDecimalPlaces,
self.radiosAutoNow, self.radiosAutoNowAdd, self.inputUploadTo,
self.choiceSelectModel, self.choiceSelectDelRule, self.inputRelationRemark,
self.inputLimitChoicesTo, self.inputRelatedName, self.inputRelatedQueryName,
self.inputToField, self.radiosDBConstraint, self.inputDBTable,
])
# 共用参数
self.commonArgs.extend([
self.inputFieldModelName, self.inputFieldDatabaseName, self.inputFieldRemarkName,
self.radiosFiledBlank, self.radiosFiledNull, self.radiosFiledPrimary,
self.radiosFiledUnique, self.radiosFiledDbIndex, self.radiosFiledEditable,
self.choicesFiledUniqueForDate, self.choicesFiledUniqueForMonth, self.choicesFiledUniqueForYear,
self.inputDefaultValue, self.inputFormHelpText, self.inputFormErrorMessage,
])
# 私有参数
self.specialArgs.extend([
# 一行表示一组私有参数
self.inputMaxLengthStaticBox, self.inputMaxLength, self.labelInputMaxLength, self.readmeInputMaxLength,
self.inputMaxDigitsStaticBox, self.inputMaxDigits, self.labelInputMaxDigits, self.readmeInputMaxDigits,
self.inputDecimalPlacesStaticBox, self.inputDecimalPlaces, self.labelInputDecimalPlaces, self.readmeInputDecimalPlaces,
self.radiosAutoNowStaticBox, self.radiosAutoNow, self.labelRadiosAutoNow, self.readmeRadiosAutoNow,
self.radiosAutoNowAddStaticBox, self.radiosAutoNowAdd, self.labelRadiosAutoNowAdd, self.readmeRadiosAutoNowAdd,
self.inputUploadToStaticBox, self.inputUploadTo, self.labelInputUploadTo, self.readmeInputUploadTo,
# 关联字段
self.choiceSelectModelStaticBox, self.choiceSelectModel, self.labelChoiceSelectModel, self.readmeChoiceSelectModel,
self.choiceSelectDelRuleStaticBox, self.choiceSelectDelRule, self.labelChoiceSelectDelRule, self.readmeChoiceSelectDelRule,
self.inputRelationRemarkStaticBox, self.inputRelationRemark, self.labelInputRelationRemark, self.readmeInputRelationRemark,
self.inputLimitChoicesToStaticBox, self.inputLimitChoicesTo, self.labelInputLimitChoicesTo, self.readmeInputLimitChoicesTo,
self.inputRelatedNameStaticBox, self.inputRelatedName, self.labelInputRelatedName, self.readmeInputRelatedName,
self.inputRelatedQueryNameStaticBox, self.inputRelatedQueryName, self.labelInputRelatedQueryName, self.readmeInputRelatedQueryName,
self.inputToFieldStaticBox, self.inputToField, self.labelInputToField, self.readmeInputToField,
self.radiosDBConstraintStaticBox, self.radiosDBConstraint, self.labelRadiosDBConstraint, self.readmeRadiosDBConstraint,
self.inputDBTableStaticBox, self.inputDBTable, self.labelInputDBTable, self.readmeInputDBTable,
])
# 字体初始化控件录入
self.readmeStaticTexts.extend([
self.readmeChoiceFieldType,self.readmeInputFieldModelName,
self.readmeInputFieldDatabaseName,self.readmeInputFieldRemarkName,
self.readmeRadiosFiledBlank,self.readmeRadiosFiledNull,
self.readmeRadiosFiledPrimary,self.readmeRadiosFiledUnique,
self.readmeRadiosFiledDbIndex,self.readmeRadiosFiledEditable,
self.readmeInputMaxLength,self.readmeRadiosAutoNow,
self.readmeRadiosAutoNowAdd,self.readmeInputDefaultValue,
self.readmeInputFormHelpText,self.readmeInputFormErrorMessage,
self.readmeInputUploadTo,self.readmeInputMaxDigits,
self.readmeInputDecimalPlaces,self.readmeChoicesFiledUniqueForDate,
self.readmeChoicesFiledUniqueForMonth,self.readmeChoicesFiledUniqueForYear,
self.readmeChoiceSelectModel,self.readmeChoiceSelectDelRule,
self.readmeInputRelationRemark,self.readmeInputLimitChoicesTo,
self.readmeInputRelatedName,self.readmeInputRelatedQueryName,
self.readmeInputToField,self.readmeRadiosDBConstraint,
self.readmeInputDBTable,
])
self.labelStaticTexts.extend([
self.choiceFieldTypeLabel,self.labelFieldModelName,
self.labelFieldDatabaseName,self.labelFieldRemarkName,
self.labelRadiosFiledBlank,self.labelRadiosFiledNull,
self.labelRadiosFiledPrimary,self.labelRadiosFiledUnique,
self.labelRadiosFiledDbIndex,self.labelRadiosFiledEditable,
self.labelInputMaxLength,self.labelRadiosAutoNow,
self.labelRadiosAutoNowAdd,self.labelInputDefaultValue,
self.labelInputFormHelpText,self.labelInputFormErrorMessage,
self.labelInputUploadTo,self.labelInputMaxDigits,
self.labelInputDecimalPlaces,self.labelChoicesFiledUniqueForDate,
self.labelChoicesFiledUniqueForMonth,self.labelChoicesFiledUniqueForYear,
self.labelChoiceSelectModel,self.labelChoiceSelectDelRule,
self.labelInputRelationRemark,self.labelInputLimitChoicesTo,
self.labelInputRelatedName,self.labelInputRelatedQueryName,
self.labelInputToField,self.labelRadiosDBConstraint,
self.labelInputDBTable,
])
# 按钮点击事件
self.Bind(wx.EVT_BUTTON, self.onExit, self.btnExit)
self.Bind(wx.EVT_BUTTON, self.onBtnAddNew, self.btnAddNew)
self.Bind(wx.EVT_BUTTON, self.onBtnResetInput, self.btnResetInput)
self.Bind(wx.EVT_BUTTON, self.onBtnAddFieldToArea, self.btnAddFieldToArea)
self.Bind(wx.EVT_BUTTON, self.onBtnExecSave, self.btnExecSave)
self.Bind(wx.EVT_BUTTON, self.onBtnPreview, self.btnPreview)
# 下拉框选择事件
self.Bind(wx.EVT_CHOICE, self.onChoiceFieldType, self.choiceFieldType)
self.Bind(wx.EVT_CHOICE, self.onChoiceSelectDelRule, self.choiceSelectDelRule)
# 文本实时监听事件
self.Bind(wx.EVT_TEXT, self.onInputFieldModelName, self.inputFieldModelName)
self.Bind(wx.EVT_TEXT, self.onInputMaxLength, self.inputMaxLength)
self.Bind(wx.EVT_TEXT, self.onInputMaxDigits, self.inputMaxDigits)
self.Bind(wx.EVT_TEXT, self.onInputDecimalPlaces, self.inputDecimalPlaces)
self.Bind(wx.EVT_TEXT, self.onInputRelatedName, self.inputRelatedName)
# 单选框事件
self.Bind(wx.EVT_RADIOBOX, self.onRadioChanged, self.radiosFiledBlank)
self.Bind(wx.EVT_RADIOBOX, self.onRadioChanged, self.radiosFiledNull)
self.Bind(wx.EVT_RADIOBOX, self.onRadioChanged, self.radiosFiledPrimary)
self.Bind(wx.EVT_RADIOBOX, self.onRadioChanged, self.radiosFiledUnique)
self.Bind(wx.EVT_RADIOBOX, self.onRadioChanged, self.radiosFiledDbIndex)
self.Bind(wx.EVT_RADIOBOX, self.onRadioChanged, self.radiosFiledEditable)
self.Bind(wx.EVT_RADIOBOX, self.onRadioChanged, self.radiosAutoNow)
self.Bind(wx.EVT_RADIOBOX, self.onRadioChanged, self.radiosAutoNowAdd)
def _init_Meta_panel(self):
"""初始化Meta选项面板"""
# 显示和隐藏Meta按钮,用于空间的合理布局
self.btnShowUnshowMeta = buttons.GenButton(self.panel, -1, '【显示】Meta元数据(表级参数设置)')
self.panelSizer.Add(self.btnShowUnshowMeta, 0, wx.EXPAND | wx.ALL, 2)
self.btnShowUnshowMeta.SetBackgroundColour(CON_COLOR_MAIN)
self.btnShowUnshowMeta.SetForegroundColour(CON_COLOR_WHITE)
self.metaScollPanel = scrolledpanel.ScrolledPanel(self.panel, -1, size=(730,444))
self.metaScollPanel.SetupScrolling()
metaScollPanelSizer = wx.BoxSizer(wx.VERTICAL)
self.metaScollPanel.SetSizer(metaScollPanelSizer)
self.panelSizer.Add(self.metaScollPanel, 0, wx.EXPAND | wx.ALL, 2)
# Meta的各种选项
# 抽象类(abstract)
self.metaAbstractOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')
self.metaAbstractOptionPanel = wx.StaticBoxSizer(self.metaAbstractOptionStaticBox, wx.HORIZONTAL)
metaScollPanelSizer.Add(self.metaAbstractOptionPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelMetaAbstractOption = wx.StaticText(self.metaScollPanel, -1, "1、抽象类(abstract):", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.metaAbstractOption = wx.RadioBox(self.metaScollPanel, -1, "", choices=['是', '否'])
self.readmeMetaAbstractOption = wx.StaticText(self.metaScollPanel, -1, " ** 该模型声明为抽象模型后,不会在数据库中建表。")
self.metaAbstractOptionPanel.Add(self.labelMetaAbstractOption, 0, wx.EXPAND | wx.ALL, 2)
self.metaAbstractOptionPanel.Add(self.metaAbstractOption, 0, wx.EXPAND | wx.ALL, 2)
metaScollPanelSizer.Add(self.readmeMetaAbstractOption, 0, wx.EXPAND | wx.ALL, 2)
# 模型归属应用程序(app_label)
# 可以用model._meta.label或model._meta.label_lower获取模型名称
self.metaAppLabelOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')
self.metaAppLabelOptionPanel = wx.StaticBoxSizer(self.metaAppLabelOptionStaticBox, wx.HORIZONTAL)
metaScollPanelSizer.Add(self.metaAppLabelOptionPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelMetaAppLabelOption = wx.StaticText(self.metaScollPanel, -1, "2、模型归属应用程序(app_label):", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.metaAppLabelOption = wx.Choice(self.metaScollPanel, -1, choices=[' ',]+get_configs(CONFIG_PATH)['app_names'])
self.readmeMetaAppLabelOption = wx.StaticText(self.metaScollPanel, -1, " ** 不指定,则默认归属于当前模型文件所在的应用程序。")
self.metaAppLabelOptionPanel.Add(self.labelMetaAppLabelOption, 0, wx.EXPAND | wx.ALL, 2)
self.metaAppLabelOptionPanel.Add(self.metaAppLabelOption, 1, wx.EXPAND | wx.ALL, 2)
metaScollPanelSizer.Add(self.readmeMetaAppLabelOption, 0, wx.EXPAND | wx.ALL, 2)
# 模型管理器名称(base_manager_name)
self.metaObjectsOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')
self.metaObjectsOptionPanel = wx.StaticBoxSizer(self.metaObjectsOptionStaticBox, wx.HORIZONTAL)
metaScollPanelSizer.Add(self.metaObjectsOptionPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelMetaObjectsOption = wx.StaticText(self.metaScollPanel, -1, "3、模型管理器名称(base_manager_name)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.metaObjectsOption = wx.TextCtrl(self.metaScollPanel, -1)
self.readmeMetaObjectsOption = wx.StaticText(self.metaScollPanel, -1, " ** 默认为objects。可用model.objects调出管理器。")
self.metaObjectsOptionPanel.Add(self.labelMetaObjectsOption, 0, wx.EXPAND | wx.ALL, 2)
self.metaObjectsOptionPanel.Add(self.metaObjectsOption, 1, wx.EXPAND | wx.ALL, 2)
metaScollPanelSizer.Add(self.readmeMetaObjectsOption, 0, wx.EXPAND | wx.ALL, 2)
# 数据表名(db_table)
# 在mysql中均小写,Oracle中数据库表名要用双引号括起来
self.metaDBTableOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')
self.metaDBTableOptionPanel = wx.StaticBoxSizer(self.metaDBTableOptionStaticBox, wx.HORIZONTAL)
metaScollPanelSizer.Add(self.metaDBTableOptionPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelMetaDBTableOption = wx.StaticText(self.metaScollPanel, -1, "4、数据表名(db_table)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.metaDBTableOption = wx.TextCtrl(self.metaScollPanel, -1)
self.readmeMetaDBTableOption = wx.StaticText(self.metaScollPanel, -1, " ** 默认为应用程序名+模型名,全小写。如:app_model。")
self.metaDBTableOptionPanel.Add(self.labelMetaDBTableOption, 0, wx.EXPAND | wx.ALL, 2)
self.metaDBTableOptionPanel.Add(self.metaDBTableOption, 1, wx.EXPAND | wx.ALL, 2)
metaScollPanelSizer.Add(self.readmeMetaDBTableOption, 0, | |
Number of times to sample the frame
:param int random_seed:
Seed for the random number generator
:param float ci:
Confidence interval to calculate (mean +/- ci/2.0)
:param Callable func:
Function to calculate the ci around (default: np.mean)
:param int axis:
Which axis to sample over
:returns:
The upper and lower bounds on the CI
"""
n = data.shape[axis]
rs = np.random.RandomState(random_seed)
boot_dist = []
for i in range(n_boot):
resampler = rs.randint(0, n, n)
sample = data.take(resampler, axis=axis)
boot_dist.append(func(sample, axis=axis))
boot_dist = np.array(boot_dist)
return np.percentile(boot_dist, [50 - ci/2, 50 + ci/2], axis=0)
def get_histogram(data: np.ndarray,
bins: int,
range: Optional[Tuple[int]] = None,
kernel_smoothing: bool = True,
kernel_bandwidth: Optional[str] = None,
kernel_samples: int = 100) -> Tuple[np.ndarray]:
""" Get a histogram and a kernel fit for some data
:param ndarray data:
The data to fit
:param int bins:
The number of bins to generate
:param tuple[float] range:
The range to fit bins to (argument to np.histogram)
:param bool kernel_smoothing:
If True, also generate a kernel-smoothed fit. If False, xkernel, ykernel are None
:param str kernel_bandwidth:
If not None, the method to use to estimate the kernel smoothed fit
:param int kernel_samples:
The number of samples to draw for the kernel fit
:returns:
xbins, ybins, xkernel, ykernel
"""
bins_y, bins_x = np.histogram(data, bins=bins, range=range)
# Estimate the kernel smoothed fit
if kernel_smoothing:
kernel = gaussian_kde(data, bw_method=kernel_bandwidth)
kernel_x = np.linspace(bins_x[0], bins_x[-1], kernel_samples)
kernel_y = kernel(kernel_x)
# Rescale for equal areas
bin_width = bins_x[1:] - bins_x[:-1]
hist_area = np.sum(bin_width * bins_y)
kernel_area = simps(kernel_y, kernel_x)
kernel_y = kernel_y * hist_area / kernel_area
else:
kernel_x = kernel_y = None
return bins_x, bins_y, kernel_x, kernel_y
# Plot functions
def add_lineplot(ax,
data: pd.DataFrame,
x: str, y: str,
hue: Optional[str] = None,
order: Optional[List[str]] = None,
hue_order: Optional[List[str]] = None,
palette: str = PALETTE,
savefile: Optional[pathlib.Path] = None,
label: Optional[str] = None,
err_style: str = 'band'):
""" Add a seaborn-style lineplot with extra decorations
:param Axes ax:
The matplotlib axis to add the barplot for
:param DataFrame data:
The data to add a barplot for
:param str x:
The column to use for the categorical values
:param str y:
The column to use for the real values
:param str palette:
The palette to use
:param Path savefile:
If not None, save the figure data to this path
"""
bins = {}
data = data.dropna()
if order is None:
order = np.sort(np.unique(data[x]))
if hue is None:
hue_order = [None]
elif hue_order is None:
hue_order = np.sort(np.unique(data[hue]))
for cat in order:
for hue_cat in hue_order:
if hue_cat is None:
mask = data[x] == cat
else:
mask = np.logical_and(data[x] == cat, data[hue] == hue_cat)
# Handle missing categories
n_samples = np.sum(mask)
if n_samples >= 3:
catdata = data[mask]
ydata = catdata[y].values
ymean = np.mean(ydata)
ylow, yhigh = bootstrap_ci(ydata)
else:
ymean = ylow = yhigh = np.nan
if hue is None:
bins.setdefault(x, []).append(cat)
bins.setdefault(f'{y} Mean', []).append(ymean)
bins.setdefault(f'{y} CI Low', []).append(ylow)
bins.setdefault(f'{y} CI High', []).append(yhigh)
bins.setdefault('Samples', []).append(n_samples)
else:
bins.setdefault(x, []).append(cat)
bins.setdefault(hue, []).append(hue_cat)
bins.setdefault(f'{y} Mean', []).append(ymean)
bins.setdefault(f'{y} CI Low', []).append(ylow)
bins.setdefault(f'{y} CI High', []).append(yhigh)
bins.setdefault('Samples', []).append(n_samples)
# Save the background data
bins = pd.DataFrame(bins)
if savefile is not None:
if savefile.suffix != '.xlsx':
savefile = savefile.parent / (savefile.stem + '.xlsx')
bins.to_excel(str(savefile))
# Now draw the plots
palette = colorwheel(palette, len(hue_order))
for i, hue_cat in enumerate(hue_order):
if hue_cat is None:
xcoords = bins[x].values
ymean = bins[f'{y} Mean'].values
ylow = bins[f'{y} CI Low'].values
yhigh = bins[f'{y} CI High'].values
hue_label = label
else:
hue_bins = bins[bins[hue] == hue_cat]
xcoords = hue_bins[x].values
ymean = hue_bins[f'{y} Mean'].values
ylow = hue_bins[f'{y} CI Low'].values
yhigh = hue_bins[f'{y} CI High'].values
if label is None:
hue_label = hue_cat
else:
hue_label = f'{hue_cat} {label}'
color = palette[i]
if err_style in ('band', 'bands'):
ax.fill_between(xcoords, ylow, yhigh, facecolor=color, alpha=0.5)
ax.plot(xcoords, ymean, '-', color=color, label=hue_label)
elif err_style in ('bar', 'bars'):
ax.errorbar(xcoords, ymean, np.stack([ymean-ylow, yhigh-ymean], axis=0),
capsize=15, linewidth=3, color=color, label=hue_label)
else:
raise ValueError(f'Unknown error style: "{err_style}"')
return ax
def add_histogram(ax,
data: np.ndarray,
xlabel: Optional[str] = None,
ylabel: str = 'Counts',
title: Optional[str] = None,
bins: int = 10,
draw_bars: bool = True,
bar_width: float = 0.7,
range: Optional[Tuple[float]] = None,
fit_dist: Optional[str] = None,
fit_dist_color: str = 'r',
kernel_smoothing: bool = True,
label_kernel_peaks: Optional[str] = None,
kernel_smoothing_color: str = 'c',
kernel_bandwidth: Optional[str] = None,
vlines: Optional[List[np.ndarray]] = None,
vline_colors: str = 'b'):
""" Add a histogram plot
Basic Usage:
.. code-block:: python
fig, ax = plt.subplots(1, 1)
histogram(ax, np.random.rand(64, 64),
draw_bars=True,
kernel_smoothing=True,
fit_dist='poisson',
vlines=[0.25, 0.75])
This will draw the histogram with a kernel smoothed fit, a poisson fit,
and vertical lines at x coordinates 0.25 and 0.75.
:param Axis ax:
The axis to add the histogram to
:param ndarray data:
The data to make the histogram for
:param str xlabel:
Label for the x axis
:param str ylabel:
Label for the y axis
:param str title:
Title for the axis
:param int bins:
Number of bins in the histogram
:param bool draw_bars:
If True, draw the histogram bars
:param float bar_width:
The width of the bars to plot
:param tuple[float] range:
The range to fit bins to (argument to np.histogram)
:param str fit_dist:
The name of a distribution to fit to the data
:param str fit_dist_color:
The color of the fit dist line
:param bool kernel_smoothing:
If True, plot the kernel smoothed line over the bars
:param str label_kernel_peaks:
Any of min, max, both to label extrema in the kernel
:param str kernel_smoothing_color:
The color of the kernel smoothed fit line
:param str kernel_bandwidth:
The method to calculate the kernel width with
:param list vlines:
x coords to draw vertical lines at
:param list vline_colors:
The color or list of colors for the spectra
"""
# Estimate the histogram
data = data[np.isfinite(data)]
xbins, hist, kernel_x, kernel_y = get_histogram(
data, bins=bins, range=range,
kernel_smoothing=kernel_smoothing,
kernel_bandwidth=kernel_bandwidth)
width = bar_width * (xbins[1] - xbins[0])
center = (xbins[:-1] + xbins[1:])/2
# Add bars for the histogram
if draw_bars:
ax.bar(center, hist, align='center', width=width)
# Estimate the kernel smoothed fit
if kernel_smoothing:
# Add a kernel smoothed fit
ax.plot(kernel_x, kernel_y, color=kernel_smoothing_color)
if label_kernel_peaks in ('max', 'both', True):
maxima = (np.diff(np.sign(np.diff(kernel_y))) < 0).nonzero()[0] + 1
kx_maxima = kernel_x[maxima]
ky_maxima = kernel_y[maxima]
ax.plot(kx_maxima, ky_maxima, 'oc')
for kx, ky in zip(kx_maxima, ky_maxima):
ax.text(kx, ky*1.05, "{}".format(float("{:.2g}".format(kx))),
color="c", fontsize=12)
if label_kernel_peaks in ('min', 'both', True):
minima = (np.diff(np.sign(np.diff(kernel_y))) > 0).nonzero()[0] + 1
kx_minima = kernel_x[minima]
ky_minima = kernel_y[minima]
ax.plot(kx_minima, ky_minima, 'oy')
for kx, ky in zip(kx_minima, ky_minima):
ax.text(kx, ky*0.88, "{}".format(float("{:.2g}".format(kx))),
color="y", fontsize=12)
# Fit an model distribution to the data
if fit_dist is not None:
opt_x = np.linspace(xbins[0], xbins[-1], 100)
if fit_dist == 'gamma':
fit_alpha, fit_loc, fit_beta = gamma.fit(data + 1e-5)
# print(fit_alpha, fit_loc, fit_beta)
opt_y = data = gamma.pdf(opt_x, fit_alpha, loc=fit_loc, scale=fit_beta) * data.shape[0]
else:
raise KeyError(f'Unknown fit distribution: {fit_dist}')
ax.plot(opt_x, opt_y, fit_dist_color)
# Add spectral lines
if vlines is None:
vlines = []
if isinstance(vline_colors, (str, tuple)):
vline_colors = [vline_colors for _ in vlines]
if len(vlines) != len(vline_colors):
raise ValueError(f'Number of colors and lines needs to match: {vlines} vs {vline_colors}')
ymin, ymax = ax.get_ylim()
for vline, vline_color in zip(vlines, vline_colors):
ax.vlines(vline, ymin, ymax, colors=vline_color)
# Label the axes
if xlabel not in (None, ''):
ax.set_xlabel(xlabel)
if ylabel not in (None, ''):
ax.set_ylabel(ylabel)
if title not in (None, ''):
ax.set_title(f'{title} (n={data.shape[0]})')
else:
ax.set_title(f'n = {data.shape[0]}')
# Complete Plots
def plot_3d_sphere_cloud(centers: List[Tuple[np.ndarray]],
colors: List[str] = None,
cmap: str = 'inferno',
cvalues: Optional[List[np.ndarray]] = None,
vmin: Optional[float] = None,
vmax: Optional[float] = None,
radii: List[float] = 1.0,
title: Optional[str] = None,
marker: str = 'o',
markersize: float = 10,
figsize: Tuple[int] = (16, 16),
outfile: Optional[pathlib.Path] = None,
add_colorbar: bool = False):
""" Plot the raw points we sampled
:param list[tuple[ndarray]] points:
A list of x, y, z tuples for each population
:param list[str] colors:
A list of colors for each population
:param str title:
The title for the plot
:param Path outfile:
The path to write the output | |
self.stanpAccountLabel.setObjectName("stanpAccountLabel")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.stanpAccountLabel)
self.sendMailLabel = QtWidgets.QLabel(self.formLayoutWidget_5)
self.sendMailLabel.setStyleSheet("color: rgb(255, 255, 255);\n"
"font: 75 12pt \"MS Shell Dlg 2\";")
self.sendMailLabel.setObjectName("sendMailLabel")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.sendMailLabel)
spacerItem8 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.formLayout.setItem(1, QtWidgets.QFormLayout.FieldRole, spacerItem8)
spacerItem9 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.formLayout.setItem(5, QtWidgets.QFormLayout.FieldRole, spacerItem9)
spacerItem10 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.formLayout.setItem(3, QtWidgets.QFormLayout.FieldRole, spacerItem10)
self.tableWidget_4 = QtWidgets.QTableWidget(self.but_web_controler)
self.tableWidget_4.setGeometry(QtCore.QRect(10, 280, 661, 151))
self.tableWidget_4.setStyleSheet("")
self.tableWidget_4.setObjectName("tableWidget_4")
self.tableWidget_4.setColumnCount(9)
self.tableWidget_4.setRowCount(3)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setVerticalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setVerticalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
item.setBackground(QtGui.QColor(135, 135, 140))
self.tableWidget_4.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setHorizontalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setHorizontalHeaderItem(6, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setHorizontalHeaderItem(7, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setHorizontalHeaderItem(8, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setItem(0, 1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setItem(0, 2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setItem(0, 3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setItem(0, 4, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setItem(0, 5, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setItem(0, 6, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setItem(0, 7, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setItem(1, 1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setItem(1, 2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setItem(1, 3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setItem(1, 4, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setItem(1, 5, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setItem(1, 6, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setItem(1, 7, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setItem(2, 1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setItem(2, 2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setItem(2, 3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setItem(2, 4, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setItem(2, 5, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setItem(2, 6, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setItem(2, 7, item)
self.butSubImportEmailSend = QtWidgets.QPushButton(self.but_web_controler)
self.butSubImportEmailSend.setGeometry(QtCore.QRect(590, 400, 75, 23))
self.butSubImportEmailSend.setStyleSheet("background-color: rgb(161, 161, 171);\n"
"selection-background-color: rgb(77, 145, 215);\n"
"font: 75 9pt \"MS Shell Dlg 2\";\n"
"selection-background-color: rgb(85, 85, 255);\n"
"color: rgb(255, 255, 255);")
self.butSubImportEmailSend.setObjectName("butSubImportEmailSend")
self.widget = QtWidgets.QWidget(self.but_web_controler)
self.widget.setGeometry(QtCore.QRect(370, 40, 221, 201))
self.widget.setObjectName("widget")
self.ButWebFilterFIllData_2 = QtWidgets.QPushButton(self.widget)
self.ButWebFilterFIllData_2.setGeometry(QtCore.QRect(0, 10, 221, 23))
self.ButWebFilterFIllData_2.setStyleSheet("background-color: rgb(161, 161, 171);\n"
"color: rgb(255, 255, 255);")
self.ButWebFilterFIllData_2.setObjectName("ButWebFilterFIllData_2")
self.ButWebMailSend = QtWidgets.QPushButton(self.widget)
self.ButWebMailSend.setGeometry(QtCore.QRect(0, 100, 221, 23))
self.ButWebMailSend.setStyleSheet("background-color: rgb(161, 161, 171);\n"
"color: rgb(255, 255, 255);")
self.ButWebMailSend.setObjectName("ButWebMailSend")
self.interTheWebSiteLineEdit_4 = QtWidgets.QLineEdit(self.but_web_controler)
self.interTheWebSiteLineEdit_4.setGeometry(QtCore.QRect(380, 80, 201, 20))
self.interTheWebSiteLineEdit_4.setStyleSheet("")
self.interTheWebSiteLineEdit_4.setText("")
self.interTheWebSiteLineEdit_4.setObjectName("interTheWebSiteLineEdit_4")
self.but_web_controler_2.addTab(self.but_web_controler, "")
self.but_web_next = QtWidgets.QPushButton(self.tabWebControl)
self.but_web_next.setGeometry(QtCore.QRect(400, 10, 131, 23))
self.but_web_next.setStyleSheet("background-color: rgb(161, 161, 171);\n"
"selection-background-color: rgb(77, 145, 215);\n"
"font: 75 9pt \"MS Shell Dlg 2\";\n"
"selection-background-color: rgb(85, 85, 255);\n"
"color: rgb(255, 255, 255);")
self.but_web_next.setObjectName("but_web_next")
self.but_web_mail = QtWidgets.QPushButton(self.tabWebControl)
self.but_web_mail.setGeometry(QtCore.QRect(260, 10, 131, 23))
self.but_web_mail.setStyleSheet("background-color: rgb(161, 161, 171);\n"
"selection-background-color: rgb(77, 145, 215);\n"
"font: 75 9pt \"MS Shell Dlg 2\";\n"
"selection-background-color: rgb(85, 85, 255);\n"
"color: rgb(255, 255, 255);")
self.but_web_mail.setObjectName("but_web_mail")
self.but_web_filler = QtWidgets.QPushButton(self.tabWebControl)
self.but_web_filler.setGeometry(QtCore.QRect(120, 10, 131, 23))
self.but_web_filler.setStyleSheet("background-color: rgb(161, 161, 171);\n"
"selection-background-color: rgb(77, 145, 215);\n"
"font: 75 9pt \"MS Shell Dlg 2\";\n"
"selection-background-color: rgb(85, 85, 255);\n"
"color: rgb(255, 255, 255);")
self.but_web_filler.setObjectName("but_web_filler")
self.tabWidget_left.addTab(self.tabWebControl, "")
self.tab_controlBanel = QtWidgets.QWidget()
self.tab_controlBanel.setStyleSheet("background-image: url(:/Icons/tab_theme.png);")
self.tab_controlBanel.setObjectName("tab_controlBanel")
self.frame_6 = QtWidgets.QFrame(self.tab_controlBanel)
self.frame_6.setGeometry(QtCore.QRect(110, 130, 491, 281))
self.frame_6.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_6.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_6.setObjectName("frame_6")
self.pushButton = QtWidgets.QPushButton(self.frame_6)
self.pushButton.setGeometry(QtCore.QRect(140, 100, 101, 23))
self.pushButton.setObjectName("pushButton")
self.butRestart = QtWidgets.QPushButton(self.tab_controlBanel)
self.butRestart.setGeometry(QtCore.QRect(510, 20, 71, 23))
self.butRestart.setStyleSheet("background-color: rgb(161, 161, 171);\n"
"selection-background-color: rgb(77, 145, 215);\n"
"font: 75 9pt \"MS Shell Dlg 2\";\n"
"selection-background-color: rgb(85, 85, 255);\n"
"color: rgb(255, 255, 255);")
self.butRestart.setObjectName("butRestart")
self.tabWidget_left.addTab(self.tab_controlBanel, "")
self.button_config = QtWidgets.QPushButton(self.centralwidget)
self.button_config.setGeometry(QtCore.QRect(93, 0, 65, 31))
self.button_config.setStyleSheet("background-color: rgb(161, 161, 171,0%);\n"
"background-image: url(:/Icons/buttons/config.png);\n"
"gridline-color: rgb(255, 255, 255,0%);\n"
"")
self.button_config.setText("")
self.button_config.setObjectName("button_config")
self.button_help = QtWidgets.QPushButton(self.centralwidget)
self.button_help.setGeometry(QtCore.QRect(173, -2, 66, 30))
self.button_help.setStyleSheet("background-color: rgb(161, 161, 171,0%);\n"
"background-image: url(:/Icons/buttons/button_help.png);\n"
"gridline-color: rgb(255, 255, 255,0%);\n"
"")
self.button_help.setText("")
self.button_help.setObjectName("button_help")
self.button_button1 = QtWidgets.QPushButton(self.centralwidget)
self.button_button1.setGeometry(QtCore.QRect(820, 40, 75, 23))
self.button_button1.setStyleSheet("background-color: rgb(161, 161, 171,0%);")
self.button_button1.setText("")
self.button_button1.setObjectName("button_button1")
self.button_button2 = QtWidgets.QPushButton(self.centralwidget)
self.button_button2.setGeometry(QtCore.QRect(910, 40, 75, 23))
self.button_button2.setStyleSheet("background-color: rgb(161, 161, 171,0%);")
self.button_button2.setText("")
self.button_button2.setObjectName("button_button2")
self.button_button3 = QtWidgets.QPushButton(self.centralwidget)
self.button_button3.setGeometry(QtCore.QRect(990, 40, 75, 23))
self.button_button3.setStyleSheet("background-color: rgb(161, 161, 171,0%);")
self.button_button3.setText("")
self.button_button3.setObjectName("button_button3")
self.label_meanBackground = QtWidgets.QLabel(self.centralwidget)
self.label_meanBackground.setGeometry(QtCore.QRect(20, -140, 1213, 771))
self.label_meanBackground.setStyleSheet("")
self.label_meanBackground.setText("")
self.label_meanBackground.setPixmap(QtGui.QPixmap(":/photos/main_theme2.png"))
self.label_meanBackground.setObjectName("label_meanBackground")
self.button_AI_leader = QtWidgets.QPushButton(self.centralwidget)
self.button_AI_leader.setGeometry(QtCore.QRect(145, 35, 75, 23))
self.button_AI_leader.setFocusPolicy(QtCore.Qt.ClickFocus)
self.button_AI_leader.setStyleSheet("background-color: rgb(161, 161, 171,0%);\n"
"selection-color: rgb(255, 255, 255);\n"
"gridline-color: rgb(255, 255, 255,0%);\n"
"selection-background-color: rgb(236, 236, 236);\n"
"color: rgb(172, 172, 175);\n"
"font: 100 11pt \"MS Shell Dlg 2\";")
self.button_AI_leader.setCheckable(True)
self.button_AI_leader.setObjectName("button_AI_leader")
self.buttonKnowledge = QtWidgets.QPushButton(self.centralwidget)
self.buttonKnowledge.setGeometry(QtCore.QRect(232, 35, 75, 23))
self.buttonKnowledge.setMouseTracking(True)
self.buttonKnowledge.setFocusPolicy(QtCore.Qt.TabFocus)
self.buttonKnowledge.setStyleSheet("background-color: rgb(161, 161, 171,0%);\n"
"selection-color: rgb(255, 255, 255);\n"
"gridline-color: rgb(255, 255, 255,0%);\n"
"selection-background-color: rgb(236, 236, 236);\n"
"color: rgb(172, 172, 175);\n"
"font: 100 11pt \"MS Shell Dlg 2\";")
self.buttonKnowledge.setCheckable(True)
self.buttonKnowledge.setChecked(False)
self.buttonKnowledge.setObjectName("buttonKnowledge")
self.button_analysis = QtWidgets.QPushButton(self.centralwidget)
self.button_analysis.setGeometry(QtCore.QRect(323, 34, 75, 23))
self.button_analysis.setFocusPolicy(QtCore.Qt.WheelFocus)
self.button_analysis.setStyleSheet("background-color: rgb(161, 161, 171,0%);\n"
"selection-color: rgb(255, 255, 255);\n"
"gridline-color: rgb(255, 255, 255,0%);\n"
"selection-background-color: rgb(236, 236, 236);\n"
"color: rgb(172, 172, 175);\n"
"font: 100 11pt \"MS Shell Dlg 2\";")
self.button_analysis.setCheckable(True)
self.button_analysis.setObjectName("button_analysis")
self.buttonFiles = QtWidgets.QPushButton(self.centralwidget)
self.buttonFiles.setGeometry(QtCore.QRect(406, 34, 75, 23))
self.buttonFiles.setFocusPolicy(QtCore.Qt.ClickFocus)
self.buttonFiles.setStyleSheet("background-color: rgb(161, 161, 171,0%);\n"
"selection-color: rgb(255, 255, 255);\n"
"gridline-color: rgb(255, 255, 255,0%);\n"
"selection-background-color: rgb(236, 236, 236);\n"
"color: rgb(172, 172, 175);\n"
"font: 100 11pt \"MS Shell Dlg 2\";")
self.buttonFiles.setCheckable(True)
self.buttonFiles.setObjectName("buttonFiles")
self.buttonWeb = QtWidgets.QPushButton(self.centralwidget)
self.buttonWeb.setGeometry(QtCore.QRect(478, 34, 75, 23))
self.buttonWeb.setFocusPolicy(QtCore.Qt.ClickFocus)
self.buttonWeb.setStyleSheet("background-color: rgb(161, 161, 171,0%);\n"
"selection-color: rgb(255, 255, 255);\n"
"gridline-color: rgb(255, 255, 255,0%);\n"
"selection-background-color: rgb(236, 236, 236);\n"
"color: rgb(172, 172, 175);\n"
"font: 100 11pt \"MS Shell Dlg 2\";")
self.buttonWeb.setCheckable(True)
self.buttonWeb.setObjectName("buttonWeb")
self.button_reporting = QtWidgets.QPushButton(self.centralwidget)
self.button_reporting.setGeometry(QtCore.QRect(557, 35, 91, 23))
self.button_reporting.setFocusPolicy(QtCore.Qt.ClickFocus)
self.button_reporting.setStyleSheet("background-color: rgb(161, 161, 171,0%);\n"
"selection-color: rgb(255, 255, 255);\n"
"gridline-color: rgb(255, 255, 255,0%);\n"
"selection-background-color: rgb(236, 236, 236);\n"
"color: rgb(172, 172, 175);\n"
"font: 100 11pt \"MS Shell Dlg 2\";")
self.button_reporting.setCheckable(True)
self.button_reporting.setObjectName("button_reporting")
self.tabMaps = QtWidgets.QTabWidget(self.centralwidget)
self.tabMaps.setGeometry(QtCore.QRect(810, 10, 411, 571))
self.tabMaps.setTabletTracking(False)
self.tabMaps.setStyleSheet("QTabBar::tab { height: 0px; width: 100px; };\n"
"border-color: rgb(255, 255, 255,0%) ;\n"
"")
self.tabMaps.setTabsClosable(False)
self.tabMaps.setTabBarAutoHide(False)
self.tabMaps.setProperty("set_show_tabs()", False)
self.tabMaps.setObjectName("tabMaps")
self.UK_kingdom = QtWidgets.QWidget()
self.UK_kingdom.setObjectName("UK_kingdom")
self.groupBox_2 = QtWidgets.QGroupBox(self.UK_kingdom)
self.groupBox_2.setGeometry(QtCore.QRect(10, 10, 391, 561))
self.groupBox_2.setObjectName("groupBox_2")
self.table_web_itemsDetails = QtWidgets.QTableWidget(self.groupBox_2)
self.table_web_itemsDetails.setGeometry(QtCore.QRect(20, 30, 361, 511))
self.table_web_itemsDetails.setObjectName("table_web_itemsDetails")
self.table_web_itemsDetails.setColumnCount(3)
self.table_web_itemsDetails.setRowCount(24)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setVerticalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setVerticalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setVerticalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setVerticalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setVerticalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setVerticalHeaderItem(6, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setVerticalHeaderItem(7, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setVerticalHeaderItem(8, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setVerticalHeaderItem(9, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setVerticalHeaderItem(10, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setVerticalHeaderItem(11, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setVerticalHeaderItem(12, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setVerticalHeaderItem(13, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setVerticalHeaderItem(14, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setVerticalHeaderItem(15, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setVerticalHeaderItem(16, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setVerticalHeaderItem(17, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setVerticalHeaderItem(18, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setVerticalHeaderItem(19, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setVerticalHeaderItem(20, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setVerticalHeaderItem(21, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setVerticalHeaderItem(22, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setVerticalHeaderItem(23, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(0, 0, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(0, 1, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(0, 2, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(1, 0, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(1, 1, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(1, 2, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(2, 0, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(2, 1, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(2, 2, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(3, 0, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(3, 1, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(3, 2, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(4, 0, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(4, 1, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(4, 2, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(5, 0, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(5, 1, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(5, 2, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(6, 0, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(6, 1, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(6, 2, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(7, 0, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(7, 1, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(7, 2, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(8, 0, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(8, 1, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(8, 2, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(9, 0, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(9, 1, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(9, 2, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(10, 0, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(10, 1, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(10, 2, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(11, 0, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(11, 1, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(11, 2, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(12, 0, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(12, 1, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(12, 2, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(13, 0, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(13, 1, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(13, 2, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(14, 0, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(14, 1, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(14, 2, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(15, 0, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(15, 1, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(15, 2, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(16, 0, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(16, 1, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(16, 2, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(17, 0, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(17, 1, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(17, 2, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(18, 0, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(18, 1, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(18, 2, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(19, 0, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(19, 1, item)
item = QtWidgets.QTableWidgetItem()
self.table_web_itemsDetails.setItem(19, 2, item)
| |
<reponame>almarklein/pywasm<filename>zoof/zf_codegen.py
"""
Code generator for the experimental WIP Zoof lang. It takes the AST produced
by the parser and converts it to WASM.
Tokenization: source code -> tokens
parsing: tokens -> ast-tree
modularize: ast-tree -> Module context with multiple function contexts that have ast-trees
optimization: module context -> module context with optimized ast-trees
code generations: module context -> wasm module
"""
import wasmfun as wf
from zf_tokenizer import tokenize
from zf_parser import Expr, parse
from zf_std import STD
# todo: eventually this should produce WASM more directly (without first going
# through wasmtools), at least for the instructions.
def compile(code):
""" Compile Zoof code (in the form of a string, a list of tokens, or an Expr
object) to a WASM module.
"""
if isinstance(code, str):
code = tokenize(code)
if isinstance(code, list):
code = parse(code)
if isinstance(code, Expr):
return generate_code(code)
else:
raise TypeError('compile() needs code as string, list of tokens, or Exp.')
def generate_code(ast):
""" Compile expressions (that make up an AST) to WASM instuctions.
"""
assert isinstance(ast, Expr)
assert ast.kind == 'block'
# Create Module context. This will do an initial pass to get all
# funciton definitions (and indices) straight.
m = ModuleContext(ast)
# Optimize
m.optimize_inline()
for func in m._functions.values():
func.optimize_inline()
# Compile all the things
for context in m.all_functions:
context.compile()
module = m.to_wasm()
return module
##
# todo: can we add line number info to expressions?
class ZoofCompilerError(SyntaxError):
pass
class BaseContext:
""" A context is a wrapper for certain nodes in the AST tree that represent
an execution context or scopre, such as modules and functions. They are
placeholders for information about the scope, such as number of instructions,
used variable names, and and types, which can be used during optimizatio.
"""
def __init__(self, body):
assert isinstance(body, Expr) and body.kind == 'block'
self._body = body
self._parent = None
self._expressions = []
self._functions = {}
self.instructions = []
self.names = {}
self._name_counter = 0
self._block_stack = []
def name_idx(self, name):
if name not in self.names:
self.names[name] = self._name_counter
self._name_counter += 1
return self.names[name]
def push_block(self, kind):
assert kind in ('if', 'loop')
self._block_stack.append(kind)
def pop_block(self, kind):
assert self._block_stack.pop(-1) == kind
def get_block_level(self):
for i, kind in enumerate(reversed(self._block_stack)):
if kind in ('loop'):
return i
def add_function(self, ctx):
assert isinstance(ctx, FunctionContext)
self._functions[ctx._name] = ctx # [ctx.name] = ctx
def _get_function(self, name):
fun = self._functions.get(name, None)
if fun is None and self._parent is not None:
fun = self._parent._get_function(name)
return fun
# def get_function_idx(self, name):
# if name in self._functions:
# return self._functions[name]._idx
# else:
# raise NotImplementedError('todo: also look in parent context')
def _collect_functions(self, expr):
""" Walk the ast in search of functions, which we collect into
a FunctionContext.
"""
# todo: this pass should also resolve imports and globals
# todo: count expressions here?
if expr.kind == 'func':
raise NotImplementedError()
elif expr.kind == 'return':
self._ret_count += 1 # todo: also set type?
elif expr.kind == 'block':
new_expressions = []
for sub_expr in expr.args:
if sub_expr.kind == 'func':
# todo: the function might use nonlocals, which might be globals
func_ctx = FunctionContext(self, sub_expr)
self.add_function(func_ctx)
else:
new_expressions.append(sub_expr)
self._collect_functions(sub_expr)
expr.args = new_expressions
else:
for sub_expr in expr.args:
self._collect_functions(sub_expr)
def optimize_inline(self):
self._optimize_inline(self._body)
def _optimize_inline(self, expr):
for i, subexpr in enumerate(expr.args):
if subexpr.kind == 'call':
funcname = subexpr.args[0].value
fun = self._get_function(funcname)
if fun is not None:
if fun._depth < 16 and fun._ret_count == 1:
call_args = subexpr.args[1:] # should these be wrapped in a tuple?
func_args = fun._expr.args[0].args # identifiers
# "bind" call to function signature
replacements = {}
t = subexpr.token
block = Expr('inline', t)
for call_arg, func_arg in zip(call_args, func_args):
assert func_arg.kind == 'identifier'
if call_arg.kind == 'identifier':
replacements[func_arg.value] = call_arg.value
else:
block.args.append(Expr('assign', t, func_arg.copy('$'), call_arg))
block.args.extend(fun._body.copy('$', replacements).args)
expr.args.pop(i)
expr.args.insert(i, block)
self._optimize_inline(subexpr)
def compile(self):
for expr in self._body.args:
_compile_expr(expr, self, False)
class ModuleContext(BaseContext):
""" Keep track of module-level things, like globals and functions.
"""
def __init__(self, expr):
super().__init__(expr)
self._imported_globals = []
self._globals = []
self._imported_functions = dict(print=0, perf_counter=1)
self._collect_functions(self._body)
self._collect_all_functions_and_assign_ids()
def _collect_all_functions_and_assign_ids(self):
# todo: mmm, the wf.Module class can resolve names to indices for us
# -> or is it better to do it here?
# Now collect all functions defined in this module
contexts = [self]
count = len(self._imported_functions)
all_functions = [self]
while len(contexts) > 0:
ctx = contexts.pop(0)
for sub in ctx._functions.values():
contexts.append(sub)
all_functions.append(sub)
sub._idx = count
count += 1
self.all_functions = all_functions
def to_wasm(self):
""" Create wasm Module object.
"""
# This is also the main function
locals = ['f64' for i in self.names]
main_func = wf.Function('$main', [], [], locals, self.instructions)
# Add imported funcs
funcs = []
funcs.append(wf.ImportedFuncion('print_ln', ['f64'], [], 'js', 'print_ln'))
funcs.append(wf.ImportedFuncion('perf_counter', [], ['f64'], 'js', 'perf_counter'))
# Add main funcs and other funcs
funcs.append(main_func)
for ctx in self.all_functions[1:]:
funcs.append(ctx.to_wasm())
# Compose module
return wf.Module(*funcs)
class FunctionContext(BaseContext):
""" A context keeps track of things while we walk the AST tree.
Each context represents a block-expression, e.g. the main scope or
a function definition.
"""
def __init__(self, parent, expr):
assert isinstance(parent, BaseContext)
assert expr.kind == 'func'
super().__init__(expr.args[1])
self._expr = expr
self._parent = parent # parent context
self._name = expr.token.text # not expr.value as identifier
# Init index, is set by the module
self._idx = -1
# Init return type
self._ret_types = None
self._ret_count = 0
# Process args
self._arg_types = []
for arg in expr.args[0].args:
self.name_idx(arg.value)
self._arg_types.append('f64')
self._collect_functions(self._body)
self._depth = self._count_depth(self._body.args) - len(self._arg_types)
def set_return_type(self, ret_types):
# todo: we should check the rt of every branch
# todo: awful hack
if self._name in ('eq', 'lt', 'le', 'gt', 'ge'):
self._ret_types = ('i32', )
return
rt = tuple(ret_types)
if self._ret_types is not None:
assert rt == self._ret_types
else:
self._ret_types = rt
def to_wasm(self):
""" Create wasm Function object.
"""
arg_types = self._arg_types
ret_types = self._ret_types or []
locals = ['f64' for i in self.names]
return wf.Function(self._name, arg_types, ret_types, locals, self.instructions)
def _count_depth(self, exprs):
count = 0
for expr in exprs:
assert isinstance(expr, Expr)
count += 1
count += self._count_depth(expr.args)
return count
def _compile_expr(expr, ctx, push_stack=True, drop_return=False):
""" Compile a single expression.
"""
if expr.kind == 'assign':
# Get name index to store value
assert expr.args[0].kind == 'identifier'
name = expr.args[0].value
name_idx = ctx.name_idx(name)
# Compute value
_compile_expr(expr.args[1], ctx, True)
# Store it
if push_stack:
ctx.instructions.append(('tee_local', name_idx))
else:
ctx.instructions.append(('set_local', name_idx))
elif expr.kind == 'call':
_compile_call(expr, ctx, push_stack)
elif expr.kind == 'identifier':
name = expr.value
ctx.instructions.append(('get_local', ctx.names[name]))
elif expr.kind == 'literal':
value = expr.value
assert isinstance(value, float) # todo: also str/int?
ctx.instructions.append(('f64.const', value))
elif expr.kind == 'if':
# Run test
_compile_expr(expr.args[0], ctx, True)
# Branch + body
ctx.push_block('if')
if push_stack:
ctx.instructions.append(('if', 'f64'))
else:
ctx.instructions.append(('if', 'emptyblock'))
assert len(expr.args) in (2, 3)
if push_stack:
if len(expr.args) == 2:
raise ZoofCompilerError('A result-producing if-expression needs an else clause.')
if len(expr.args[1].args) == 0 or len(expr.args[2].args) == 0:
raise ZoofCompilerError('A result-producing if-expression needs nonempty body and else clauses')
for e in expr.args[1].args[:-1]:
_compile_expr(e, ctx, False)
_compile_expr(expr.args[1].args[-1], ctx, True)
ctx.instructions.append(('else', ))
for e in expr.args[2].args[:-1]:
_compile_expr(e, ctx, False)
_compile_expr(expr.args[2].args[-1], ctx, True)
else:
for e in expr.args[1].args:
_compile_expr(e, ctx, False)
if len(expr.args) == 3:
ctx.instructions.append(('else', ))
if expr.args[2].kind == 'block':
for e in expr.args[2].args:
_compile_expr(e, ctx, False)
else:
_compile_expr(expr.args[2], ctx, False)
ctx.instructions.append(('end', ))
ctx.pop_block('if')
elif expr.kind == 'loop':
# Init blocks - (outer block for break)
ctx.push_block('loop')
for i in [('block', 'emptyblock'), ('loop', 'emptyblock')]:
ctx.instructions.append(i)
if len(expr.args) == 1:
# loop-inf
for e in expr.args[1].args:
_compile_expr(e, ctx, False)
elif len(expr.args) == 2:
# loop-while
_compile_expr(expr.args[0], ctx, True) # test
ctx.instructions.append('i32.eqz') # negate
ctx.instructions.append(('br_if', 1))
for e in expr.args[1].args:
_compile_expr(e, ctx, False)
ctx.instructions.append(('br', 0)) # loop
elif len(expr.args) == 3:
# loop-in
raise NotImplementedError()
else:
assert False, 'Unexpected number of args in loop expression.'
# Close loop
for i in [('end'), ('end')]:
ctx.instructions.append(i)
ctx.pop_block('loop')
elif expr.kind == 'continue':
| |
return Credentials(user['AccessKeyId'], user['SecretAccessKey'])
def get_deployment_user_credentials(self, deployment_name):
user = self.context.get(test_constant.ATTR_USERS, {}).get(self.get_deployment_user_name(deployment_name))
if not user:
self.create_deployment_user(deployment_name)
user = self.context[test_constant.ATTR_USERS].get(self.get_deployment_user_name(deployment_name))
return Credentials(user['AccessKeyId'], user['SecretAccessKey'])
def lmbr_aws(self, *args, **kwargs):
expect_failure = kwargs.get('expect_failure', False)
ignore_failure = kwargs.get('ignore_failure', False)
assumed_role = kwargs.get('project_role', None)
if assumed_role:
credentials = self.get_project_user_credentials()
else:
assumed_role = kwargs.get('deployment_role', None)
if assumed_role:
deployment_name = self.find_arg(args, '--deployment', self.TEST_DEPLOYMENT_NAME)
credentials = self.get_deployment_user_credentials(deployment_name)
sys.argv = ['']
sys.argv.extend(args)
sys.argv.extend(['--root-directory', self.ROOT_DIR])
sys.argv.extend(['--no-prompt'])
if assumed_role:
sys.argv.extend(['--assume-role', assumed_role, '--aws-access-key', credentials.access_key, '--aws-secret-key', credentials.secret_key])
else:
sys.argv.extend(self.lmbr_aws_credentials_args)
display = 'lmbr_aws'
for arg in sys.argv[1:]:
display = display + ' ' + arg
print '\n\n{}\n'.format(display)
with self.captured_output() as (out, err):
res = resource_manager.cli.main()
self.lmbr_aws_stdout = out.getvalue()
self.lmbr_aws_stderr = err.getvalue()
if ignore_failure:
return res
if expect_failure:
self.assertNotEqual(res, 0)
else:
self.assertEqual(res, 0)
return res
def load_local_project_settings(self):
if os.path.exists(self.LOCAL_PROJECT_SETTINGS_FILE_PATH):
with open(self.LOCAL_PROJECT_SETTINGS_FILE_PATH, 'r') as f:
return json.load(f)
else:
return {}
def load_cloud_project_settings(self):
project_stack_arn = self.get_project_stack_arn()
config_bucket_id = self.get_stack_resource_physical_id(project_stack_arn, 'Configuration')
return json.load(self.aws_s3.get_object(Bucket=config_bucket_id, Key="project-settings.json")["Body"]) if config_bucket_id else {}
def refresh_stack_description(self, arn):
self.stack_descriptions[arn] = self.aws_cloudformation.describe_stacks(StackName=arn)['Stacks'][0]
def get_stack_description(self, arn):
description = self.stack_descriptions.get(arn)
if not description:
self.refresh_stack_description(arn)
description = self.stack_descriptions.get(arn)
return description
def get_stack_output(self, arn, output_name):
description = self.get_stack_description(arn)
outputs = description.get('Outputs', [])
for output in outputs:
if output.get('OutputKey') == output_name:
return output.get('OutputValue')
return None
def refresh_stack_resources(self, arn):
self.stack_resource_descriptions[arn] = self.aws_cloudformation.describe_stack_resources(StackName=arn)
def get_stack_resource(self, stack_arn, logical_resource_id):
describe_stack_resources_result = self.stack_resource_descriptions.get(stack_arn, None)
if describe_stack_resources_result is None:
describe_stack_resources_result = self.aws_cloudformation.describe_stack_resources(StackName=stack_arn)
self.stack_resource_descriptions[stack_arn] = describe_stack_resources_result
for resource in describe_stack_resources_result['StackResources']:
if resource['LogicalResourceId'] == logical_resource_id:
return resource
self.fail('Resource {} not found in stack {}'.format(logical_resource_id, stack_arn))
def get_stack_resource_physical_id(self, stack_arn, logical_resource_id):
resource = self.get_stack_resource(stack_arn, logical_resource_id)
return resource['PhysicalResourceId'] if resource else {}
def get_stack_resource_arn(self, stack_arn, logical_resource_id):
resource = self.get_stack_resource(stack_arn, logical_resource_id)
if resource['ResourceType'] == 'AWS::CloudFormation::Stack':
return resource['PhysicalResourceId']
else:
return self.make_resource_arn(stack_arn, resource['ResourceType'], resource['PhysicalResourceId'])
RESOURCE_ARN_PATTERNS = {
'AWS::DynamoDB::Table': 'arn:aws:dynamodb:{region}:{account_id}:table/{resource_name}',
'AWS::Lambda::Function': 'arn:aws:lambda:{region}:{account_id}:function:{resource_name}',
'AWS::SQS::Queue': 'arn:aws:sqs:{region}:{account_id}:{resource_name}',
'AWS::SNS::Topic': 'arn:aws:sns:{region}:{account_id}:{resource_name}',
'AWS::S3::Bucket': 'arn:aws:s3:::{resource_name}',
'AWS::IAM::ManagedPolicy': '{resource_name}'
}
def make_resource_arn(self, stack_arn, resource_type, resource_name):
if resource_type == 'AWS::IAM::Role':
res = self.aws_iam.get_role(RoleName=resource_name)
return res['Role']['Arn']
else:
pattern = self.RESOURCE_ARN_PATTERNS.get(resource_type, None)
if pattern is None:
raise RuntimeError('Unsupported resource type {} for resource {}.'.format(resource_type, resource_name))
return pattern.format(
region=self.get_region_from_arn(stack_arn),
account_id=self.get_account_id_from_arn(stack_arn),
resource_name=resource_name)
def get_stack_name_from_arn(self, arn):
# Stack ARN format: arn:aws:cloudformation:{region}:{account}:stack/{name}/{guid}
if arn is None: return None
return arn.split('/')[1]
def get_region_from_arn(self, arn):
# Stack ARN format: arn:aws:cloudformation:{region}:{account}:stack/{name}/{guid}
if arn is None: return None
return arn.split(':')[3]
def get_account_id_from_arn(self, arn):
# Stack ARN format: arn:aws:cloudformation:{region}:{account}:stack/{name}/{guid}
if arn is None: return None
return arn.split(':')[4]
def get_role_name_from_arn(self, arn):
# Role ARN format: arn:aws:iam::{account_id}:role/{resource_name}
if arn is None: return None
return arn[arn.rfind('/')+1:]
def get_project_stack_arn(self):
settings = self.load_local_project_settings()
return settings.get('ProjectStackId') if settings else None
def get_resource_group_stack_arn(self, deployment_name, resource_group_name):
deployment_stack_arn = self.get_deployment_stack_arn(deployment_name)
return self.get_stack_resource_arn(deployment_stack_arn, resource_group_name)
def get_deployment_stack_arn(self, deployment_name):
deployment = self.get_deployment_settings(deployment_name)
return deployment['DeploymentStackId'] if 'DeploymentStackId' in deployment else None
def get_deployment_access_stack_arn(self, deployment_name):
deployment = self.get_deployment_settings(deployment_name)
return deployment['DeploymentAccessStackId'] if 'DeploymentAccessStackId' in deployment else None
def get_deployment_settings(self, deployment_name):
settings = self.load_cloud_project_settings()
if not settings:
return {}
deployments = settings['deployment']
if not deployments or deployment_name not in deployments:
return {}
return deployments[deployment_name]
def verify_s3_object_does_not_exist(self, bucket, key):
try:
res = self.aws_s3.head_object(Bucket=bucket, Key=key)
self.fail("s3 bucket {} object {} was not deleted. head_object returned {}".format(bucket, key, res))
except ClientError as e:
self.assertEquals(e.response['Error']['Code'], '404')
def verify_s3_object_exists(self, bucket, key):
try:
res = self.aws_s3.head_object(Bucket=bucket, Key=key)
except Exception as e:
self.fail("head_object(Bucket='{}', Key='{}') failed: {}".format(bucket, key, e))
if res.get('DeleteMarker', False):
self.fail("head_object(Bucket='{}', Key='{}') -> DeleteMarker is True".format(bucket, key))
if res.get('ContentLength', 0) == 0:
self.fail("head_object(Bucket='{}', Key='{}') -> ContentLength is 0".format(bucket, key))
def verify_stack(self, context, stack_arn, spec):
verified = False
if 'StackStatus' in spec:
res = self.aws_cloudformation.describe_stacks(StackName = stack_arn)
self.assertEqual(res['Stacks'][0]['StackStatus'], spec['StackStatus'], 'Stack Status {} when expected {} for stack with context {}'.format(res['Stacks'][0]['StackStatus'], spec['StackStatus'], stack_arn, context))
verified = True
if 'StackResources' in spec:
self.verify_stack_resources(context, stack_arn, spec['StackResources'])
verified = True
self.assertTrue(verified)
def verify_stack_resources(self, context, stack_arn, expected_resources):
res = self.aws_cloudformation.describe_stack_resources(StackName = stack_arn)
stack_resources = res['StackResources']
resources_seen = []
for stack_resource in stack_resources:
expected_resource = expected_resources.get(stack_resource['LogicalResourceId'], None)
self.assertIsNotNone(expected_resource, 'Unexpected Resource {} in Stack {}'.format(stack_resource, stack_arn))
resources_seen.append(stack_resource['LogicalResourceId'])
self.assertEquals(expected_resource['ResourceType'], stack_resource['ResourceType'],
'Expected type {} on resource {} in stack {}, Found {}'.format(
expected_resource['ResourceType'],
stack_resource['LogicalResourceId'],
stack_arn, stack_resource['ResourceType']))
if 'Permissions' in expected_resource:
self.expand_resource_references(expected_resource['Permissions'], stack_resources)
handler = self.verify_handlers.get(expected_resource['ResourceType'], None)
if handler is not None:
handler(self, context + ' resource ' + stack_resource['LogicalResourceId'], stack_resource, expected_resource)
self.assertEquals(sorted(resources_seen), sorted(expected_resources.keys()), 'Missing resources in stack {}. \nSeen: {}\nExpected: {}'.format(stack_arn, sorted(resources_seen), sorted(expected_resources.keys())))
def expand_resource_references(self, permissions, stack_resources):
for permission in permissions:
permission_resources = permission['Resources']
for permission_resource_index, permission_resource in enumerate(permission_resources):
if permission_resource.startswith('$'):
permission_resource_name = permission_resource[1:-1]
for stack_resource in stack_resources:
if stack_resource['LogicalResourceId'] == permission_resource_name:
permission_resources[permission_resource_index] = self.make_resource_arn(
stack_resource['StackId'],
stack_resource['ResourceType'],
stack_resource['PhysicalResourceId'])
def verify_child_stack(self, context, stack_resource, expected_resource):
context += ' child stack ' + stack_resource['LogicalResourceId']
self.verify_stack(context, stack_resource['PhysicalResourceId'], expected_resource)
def verify_lambda_function(self, context, stack_resource, expected_resource):
if 'Permissions' in expected_resource:
get_function_configuration_res = self.aws_lambda.get_function_configuration(FunctionName=stack_resource['PhysicalResourceId'])
role_name = self.get_role_name_from_arn(get_function_configuration_res['Role'])
self.verify_role_permissions(context + ' role ' + role_name, stack_resource['StackId'], role_name, expected_resource['Permissions'])
def verify_iam_role(self, context, stack_resource, expected_resource):
if 'Permissions' in expected_resource:
self.verify_role_permissions(context, stack_resource['StackId'], stack_resource['PhysicalResourceId'], expected_resource['Permissions'])
def verify_iam_managed_policy(self, context, stack_resource, expected_resource):
if 'Permissions' in expected_resource:
self.verify_managed_policy_permissions(context, stack_resource['PhysicalResourceId'], expected_resource['Permissions'])
def verify_role_permissions(self, context, stack_arn, role_name, permissions):
policy_documents = []
# get inline policy documents
list_role_policies_res = self.aws_iam.list_role_policies(RoleName = role_name)
# print '*** list_role_policies_res', role_name, list_role_policies_res
for policy_name in list_role_policies_res['PolicyNames']:
get_role_policy_res = self.aws_iam.get_role_policy(RoleName = role_name, PolicyName = policy_name)
# print '*** get_role_policy_res', role_name, policy_name, get_role_policy_res
policy_documents.append(json.dumps(get_role_policy_res['PolicyDocument']))
# get attached policy documents
list_attached_role_policies_res = self.aws_iam.list_attached_role_policies(RoleName = role_name)
# print '*** list_attached_role_policies_res', role_name, list_attached_role_policies_res
for attached_policy in list_attached_role_policies_res['AttachedPolicies']:
policy_arn = attached_policy['PolicyArn']
list_policy_versions_res = self.aws_iam.list_policy_versions(PolicyArn = policy_arn)
# print '*** list_policy_versions_res', policy_arn, list_policy_versions_res
for policy_version in list_policy_versions_res['Versions']:
if policy_version['IsDefaultVersion']:
get_policy_version_res = self.aws_iam.get_policy_version(PolicyArn = policy_arn, VersionId = policy_version['VersionId'])
# print '*** get_policy_version_res', policy_arn, policy_version['VersionId'], get_policy_version_res
policy_documents.append(json.dumps(get_policy_version_res['PolicyVersion']['Document'], indent=4))
# verify using the accumulated policy documents
context += ' role ' + role_name
self.verify_permissions(context, policy_documents, permissions)
def verify_managed_policy_permissions(self, context, policy_arn, permissions):
policy_documents = []
# get policy document
list_policy_versions_res = self.aws_iam.list_policy_versions(PolicyArn = policy_arn)
# print '*** list_policy_versions_res', policy_arn, list_policy_versions_res
for policy_version in list_policy_versions_res['Versions']:
if policy_version['IsDefaultVersion']:
get_policy_version_res = self.aws_iam.get_policy_version(PolicyArn = policy_arn, VersionId = policy_version['VersionId'])
# print '*** get_policy_version_res', policy_arn, policy_version['VersionId'], get_policy_version_res
policy_documents.append(json.dumps(get_policy_version_res['PolicyVersion']['Document'], indent=4))
# verify using the accumulated policy documents
context += ' managed policy ' + policy_arn
self.verify_permissions(context, policy_documents, permissions)
def verify_permissions(self, context, policy_documents, permissions):
#print '**** Policy Documents:'
#for policy_document in policy_documents:
# print '\n' + json.dumps(json.loads(policy_document), indent=4)
for permission in permissions:
description = permission.get('Description', None)
#if description:
# print '>>> Checking Permission:', description
if 'Allow' in permission:
self.assertNotIn('Deny', permission)
action_names = permission['Allow']
expect_allowed = True
elif 'Deny' in permission:
self.assertNotIn('Allow', permission)
action_names = permission['Deny']
expect_allowed = False
else:
self.fail('For "{}" neither Allow or Deny was specified for permission {} in context {}'.format(description, permission, context))
resource_arns = permission.get('Resources', [])
#print '**** action_names:', action_names
#print '**** resource_arns:', resource_arns
simulate_custom_policy_res = self.aws_iam.simulate_custom_policy(
PolicyInputList = policy_documents,
ActionNames = action_names,
ResourceArns = resource_arns)
#print '**** simulate_custom_policy_res', json.dumps(simulate_custom_policy_res, indent=4)
def find_evaluation_result(action, resource_arn):
for evaluation_result in simulate_custom_policy_res['EvaluationResults']:
# print '*** evaluation_result', json.dumps(evaluation_result, indent=4)
if evaluation_result['EvalActionName'] == action:
if 'ResourceSpecificResults' in evaluation_result:
for resource_specific_result in evaluation_result['ResourceSpecificResults']:
if resource_specific_result['EvalResourceName'] == resource_arn:
return evaluation_result
else:
if evaluation_result['EvalResourceName'] == resource_arn:
return evaluation_result
self.fail('No evaluation result found for action {} and resource {} for context {}'.format(action, resource_arn, context))
def format_error_message(expected_permission):
dumped_policy_documents = ""
for policy_document in policy_documents:
dumped_policy_documents = dumped_policy_documents + '\n' + json.dumps(json.loads(policy_document), indent=4) + '\n'
return 'For "{}" expected permission {} for action {} and resource {} in context {}.\n\nEvaluation Result:\n\n{}\n\nPolicy Documents:\n{}'.format(
description,
expected_permission,
action,
resource_arn,
context,
json.dumps(evaluation_result, indent=4),
dumped_policy_documents
)
for action in action_names:
for resource_arn in resource_arns:
evaluation_result = find_evaluation_result(action, resource_arn)
if expect_allowed:
self.assertEqual(evaluation_result['EvalDecision'], 'allowed', format_error_message('allowed'))
else:
self.assertNotEqual(evaluation_result['EvalDecision'], 'allowed', format_error_message('denied'))
def get_mapping(self, resource_group_name, resource_name):
user_settings_file_path = os.path.join(self.ROOT_DIR, resource_manager.constant.PROJECT_CACHE_DIRECTORY_NAME, test_constant.GAME_NAME, "pc", resource_manager.constant.PROJECT_USER_DIRECTORY_NAME, resource_manager.constant.PROJECT_AWS_DIRECTORY_NAME, resource_manager.constant.USER_SETTINGS_FILENAME)
with open(user_settings_file_path, 'r') as user_settings_file:
user_settings = json.load(user_settings_file)
mappings = user_settings.get('Mappings', {})
mapping_name = resource_group_name + '.' + resource_name
mapping = mappings.get(mapping_name, None)
self.assertIsNotNone(mapping, 'Missing mapping for {}'.format(mapping_name))
return mapping
def verify_user_mappings(self, deployment_name, logical_ids, expected_physical_resource_ids = {}):
user_settings_file_path = os.path.join(self.ROOT_DIR, resource_manager.constant.PROJECT_CACHE_DIRECTORY_NAME, test_constant.GAME_NAME, "pc", resource_manager.constant.PROJECT_USER_DIRECTORY_NAME, resource_manager.constant.PROJECT_AWS_DIRECTORY_NAME, resource_manager.constant.USER_SETTINGS_FILENAME)
print 'Verifing mappings in {}'.format(user_settings_file_path)
with open(user_settings_file_path, 'r') as user_settings_file:
user_settings = json.load(user_settings_file)
mappings = user_settings.get('Mappings', {})
self.__verify_mappings(mappings, deployment_name, logical_ids, expected_physical_resource_ids = expected_physical_resource_ids)
def verify_release_mappings(self, deployment_name, logical_ids, expected_physical_resource_ids = {}):
release_mappings_file_name = deployment_name + | |
sIsYUV = sColorFamily == vs.YUV
sIsGRAY = sColorFamily == vs.GRAY
sIsYCOCG = sColorFamily == vs.YCOCG
if sColorFamily == vs.COMPAT:
raise ValueError(funcName + ': color family *COMPAT* is not supported!')
if not (sIsYUV or sIsYCOCG):
raise ValueError(funcName + ': only YUV or YCoCg color family is allowed!')
# Parameters
if matrices is None:
matrices = ['601', '709', '2020', '240', 'FCC', 'YCgCo']
elif not isinstance(matrices, list) and isinstance(matrices, str):
raise TypeError(funcName + ': \'matrices\' must be a (list of) str!')
if full is None:
full = sIsYCOCG
elif not isinstance(full, int):
raise TypeError(funcName + ': \'full\' must be a bool!')
if lower is None:
lower = -0.02
elif not (isinstance(lower, float) or isinstance(lower, int)):
raise TypeError(funcName + ': \'lower\' must be an int or a float!')
if upper is None:
upper = 1.02
elif not (isinstance(upper, float) or isinstance(upper, int)):
raise TypeError(funcName + ': \'upper\' must be an int or a float!')
# Process
clip = ToYUV(clip, css='444', depth=32, full=full)
props = ['RMean', 'GMean', 'BMean', 'TotalMean']
def _FrameProps(n, f):
fout = f.copy()
fout.props.__setattr__(props[3], (f.props.__getattr__(props[0]) + f.props.__getattr__(props[1]) + f.props.__getattr__(props[2])) / 3)
return fout
rgb_clips = []
for matrix in matrices:
rgb_clip = ToRGB(clip, matrix=matrix)
rgb_clip = rgb_clip.std.Expr('x {lower} < 1 x - x {upper} > x 0 ? ?'.format(lower=lower, upper=upper))
rgb_clip = PlaneAverage(rgb_clip, 0, props[0])
rgb_clip = PlaneAverage(rgb_clip, 1, props[1])
rgb_clip = PlaneAverage(rgb_clip, 2, props[2])
rgb_clip = core.std.ModifyFrame(rgb_clip, rgb_clip, selector=_FrameProps)
rgb_clip = Depth(rgb_clip, depth=8, dither='none')
rgb_clip = rgb_clip.text.FrameProps(props, alignment=7)
rgb_clip = rgb_clip.text.Text(matrix, alignment=8)
rgb_clips.append(rgb_clip)
# Output
return core.std.StackVertical(rgb_clips)
################################################################################################################################
################################################################################################################################
## Utility function: postfix2infix()
################################################################################################################################
## Convert postfix expression (used by std.Expr) to infix expression
################################################################################################################################
## Basic parameters
## expr {str}: the postfix expression to be converted
################################################################################################################################
def postfix2infix(expr):
funcName = 'postfix2infix'
op1 = ['exp', 'log', 'sqrt', 'abs', 'not', 'dup']
op2 = ['+', '-', '*', '/', 'max', 'min', '>', '<', '=', '>=', '<=', 'and', 'or', 'xor', 'swap', 'pow']
op3 = ['?']
def remove_brackets(x):
if x[0] == '(' and x[len(x) - 1] == ')':
p = 1
for c in x[1:-1]:
if c == '(':
p += 1
elif c == ')':
p -= 1
if p == 0:
break
if p == 1:
return x[1:-1]
return x
if not isinstance(expr, str):
raise TypeError(funcName + ': \'expr\' must be a str!')
expr_list = expr.split()
stack = []
for item in expr_list:
if op1.count(item) > 0:
try:
operand1 = stack.pop()
except IndexError:
raise ValueError(funcName + ': Invalid expression, require operands.')
if item == 'dup':
stack.append(operand1)
stack.append(operand1)
else:
stack.append('{}({})'.format(item, remove_brackets(operand1)))
elif op2.count(item) > 0:
try:
operand2 = stack.pop()
operand1 = stack.pop()
except IndexError:
raise ValueError(funcName + ': Invalid expression, require operands.')
stack.append('({} {} {})'.format(operand1, item, operand2))
elif op3.count(item) > 0:
try:
operand3 = stack.pop()
operand2 = stack.pop()
operand1 = stack.pop()
except IndexError:
raise ValueError(funcName + ': Invalid expression, require operands.')
stack.append('({} {} {} {} {})'.format(operand1, item, operand2, ':', operand3))
else:
stack.append(item)
if len(stack) > 1:
raise ValueError(funcName + ': Invalid expression, require operators.')
return remove_brackets(stack[0])
################################################################################################################################
################################################################################################################################
################################################################################################################################
################################################################################################################################
## Frame property functions below
################################################################################################################################
################################################################################################################################
################################################################################################################################
################################################################################################################################
## Frame property function: SetColorSpace()
################################################################################################################################
## Modify the color space related frame properties in the given clip.
## Detailed descriptions of these properties: http://www.vapoursynth.com/doc/apireference.html
################################################################################################################################
## Parameters
## %Any%: for the property named "_%Any%"
## - None: do nothing
## - True: do nothing
## - False: delete corresponding frame properties if exist
## - {int}: set to this value
################################################################################################################################
def SetColorSpace(clip, ChromaLocation=None, ColorRange=None, Primaries=None, Matrix=None, Transfer=None):
# Set VS core and function name
core = vs.get_core()
funcName = 'SetColorSpace'
if not isinstance(clip, vs.VideoNode):
raise TypeError(funcName + ': \"clip\" must be a clip!')
# Modify frame properties
if ChromaLocation is None:
pass
elif isinstance(ChromaLocation, bool):
if ChromaLocation is False:
clip = core.std.SetFrameProp(clip, prop='_ChromaLocation', delete=True)
elif isinstance(ChromaLocation, int):
if ChromaLocation >= 0 and ChromaLocation <=5:
clip = core.std.SetFrameProp(clip, prop='_ChromaLocation', intval=ChromaLocation)
else:
raise ValueError(funcName + ': valid range of \"ChromaLocation\" is [0, 5]!')
else:
raise TypeError(funcName + ': \"ChromaLocation\" must be an int or a bool!')
if ColorRange is None:
pass
elif isinstance(ColorRange, bool):
if ColorRange is False:
clip = core.std.SetFrameProp(clip, prop='_ColorRange', delete=True)
elif isinstance(ColorRange, int):
if ColorRange >= 0 and ColorRange <=1:
clip = core.std.SetFrameProp(clip, prop='_ColorRange', intval=ColorRange)
else:
raise ValueError(funcName + ': valid range of \"ColorRange\" is [0, 1]!')
else:
raise TypeError(funcName + ': \"ColorRange\" must be an int or a bool!')
if Primaries is None:
pass
elif isinstance(Primaries, bool):
if Primaries is False:
clip = core.std.SetFrameProp(clip, prop='_Primaries', delete=True)
elif isinstance(Primaries, int):
clip = core.std.SetFrameProp(clip, prop='_Primaries', intval=Primaries)
else:
raise TypeError(funcName + ': \"Primaries\" must be an int or a bool!')
if Matrix is None:
pass
elif isinstance(Matrix, bool):
if Matrix is False:
clip = core.std.SetFrameProp(clip, prop='_Matrix', delete=True)
elif isinstance(Matrix, int):
clip = core.std.SetFrameProp(clip, prop='_Matrix', intval=Matrix)
else:
raise TypeError(funcName + ': \"Matrix\" must be an int or a bool!')
if Transfer is None:
pass
elif isinstance(Transfer, bool):
if Transfer is False:
clip = core.std.SetFrameProp(clip, prop='_Transfer', delete=True)
elif isinstance(Transfer, int):
clip = core.std.SetFrameProp(clip, prop='_Transfer', intval=Transfer)
else:
raise TypeError(funcName + ': \"Transfer\" must be an int or a bool!')
# Output
return clip
################################################################################################################################
################################################################################################################################
## Frame property function: AssumeFrame()
################################################################################################################################
## Set all the frames in the given clip to be frame-based(progressive).
## It can be used to prevent the field order set in de-interlace filters from being overridden by the frame property '_FieldBased'.
## Also it may be useful to be applied before upscaling or anti-aliasing scripts using EEDI3/nnedi3, etc.(whose field order should be specified explicitly)
################################################################################################################################
def AssumeFrame(clip):
# Set VS core and function name
core = vs.get_core()
funcName = 'AssumeFrame'
if not isinstance(clip, vs.VideoNode):
raise TypeError(funcName + ': \"clip\" must be a clip!')
# Modify frame properties
clip = core.std.SetFrameProp(clip, prop='_FieldBased', intval=0)
clip = core.std.SetFrameProp(clip, prop='_Field', delete=True)
# Output
return clip
################################################################################################################################
################################################################################################################################
## Frame property function: AssumeTFF()
################################################################################################################################
## Set all the frames in the given clip to be top-field-first(interlaced).
## This frame property will override the field order set in those de-interlace filters.
################################################################################################################################
def AssumeTFF(clip):
# Set VS core and function name
core = vs.get_core()
funcName = 'AssumeTFF'
if not isinstance(clip, vs.VideoNode):
raise TypeError(funcName + ': \"clip\" must be a clip!')
# Modify frame properties
clip = core.std.SetFrameProp(clip, prop='_FieldBased', intval=2)
clip = core.std.SetFrameProp(clip, prop='_Field', delete=True)
# Output
return clip
################################################################################################################################
################################################################################################################################
## Frame property function: AssumeBFF()
################################################################################################################################
## Set all the frames in the given clip to be bottom-field-first(interlaced).
## This frame property will override the field order set in those de-interlace filters.
################################################################################################################################
def AssumeBFF(clip):
# Set VS core and function name
core = vs.get_core()
funcName = 'AssumeBFF'
if not isinstance(clip, vs.VideoNode):
raise TypeError(funcName + ': \"clip\" must be a clip!')
# Modify frame properties
clip = core.std.SetFrameProp(clip, prop='_FieldBased', intval=1)
clip = core.std.SetFrameProp(clip, prop='_Field', delete=True)
# Output
return clip
################################################################################################################################
################################################################################################################################
## Frame property function: AssumeField()
################################################################################################################################
## Set all the frames in the given clip to be field-based(derived from interlaced frame).
################################################################################################################################
## Parameters
## top {bool}:
## - True: top-field-based
## - False: bottom-field-based
################################################################################################################################
def AssumeField(clip, top):
# Set VS core and function name
core = vs.get_core()
funcName = 'AssumeField'
if not isinstance(clip, vs.VideoNode):
raise TypeError(funcName + ': \"clip\" must be a clip!')
if not isinstance(top, int):
raise TypeError(funcName + ': \"top\" must be a bool!')
# Modify frame properties
clip = core.std.SetFrameProp(clip, prop='_FieldBased', delete=True)
clip = core.std.SetFrameProp(clip, prop='_Field', intval=1 if top else 0)
# Output
return clip
################################################################################################################################
################################################################################################################################
## Frame property function: AssumeCombed()
################################################################################################################################
## Set all the frames in the given clip to be combed or not.
################################################################################################################################
## Parameters
## combed {bool}:
## - None: delete property '_Combed' if exist
## - True: set property '_Combed' to 1
## - False: set property '_Combed' to 0
## default: True
################################################################################################################################
def AssumeCombed(clip, combed=True):
# Set VS core and function name
core = vs.get_core()
funcName = 'AssumeCombed'
if not isinstance(clip, vs.VideoNode):
raise TypeError(funcName + ': \"clip\" must be a clip!')
# Modify frame properties
if combed is None:
clip = core.std.SetFrameProp(clip, prop='_Combed', delete=True)
elif not isinstance(combed, int):
raise TypeError(funcName + ': \"combed\" must be a bool!')
else:
clip = core.std.SetFrameProp(clip, prop='_Combed', intval=combed)
# Output
return clip
################################################################################################################################
################################################################################################################################
################################################################################################################################
################################################################################################################################
## Helper functions below
################################################################################################################################
################################################################################################################################
################################################################################################################################
################################################################################################################################
## Helper function: CheckVersion()
################################################################################################################################
## Check if the version | |
"""Class for cross-validation over distributions of hyperparameters
-- <NAME> and <NAME>
"""
import logging
from tune_sklearn.utils import check_is_pipeline
from tune_sklearn.tune_basesearch import TuneBaseSearchCV
from tune_sklearn._trainable import _Trainable
from tune_sklearn._trainable import _PipelineTrainable
from sklearn.base import clone
from ray import tune
from ray.tune.suggest import ConcurrencyLimiter
from tune_sklearn.list_searcher import RandomListSearcher
from tune_sklearn.utils import check_error_warm_start
import numpy as np
import warnings
import os
logger = logging.getLogger(__name__)
def _check_distribution(dist, search_optimization):
if search_optimization == "random":
if not (isinstance(dist, list) or hasattr(dist, "rvs")):
raise ValueError("distribution must be a list or scipy "
"distribution when using randomized search")
elif not isinstance(dist, tuple) and not isinstance(dist, list):
if search_optimization == "bayesian":
import skopt
if not isinstance(dist, skopt.space.Dimension):
raise ValueError("distribution must be a tuple, list, or "
"`skopt.space.Dimension` instance when using "
"bayesian search")
elif search_optimization == "hyperopt":
import hyperopt.pyll
if not isinstance(dist, hyperopt.pyll.base.Apply):
raise ValueError(
"distribution must be a tuple, list, or "
"`hyperopt.pyll.base.Apply` instance when using "
"hyperopt search")
elif search_optimization == "optuna":
import optuna.distributions
if not isinstance(dist, optuna.distributions.BaseDistribution):
raise ValueError("distribution must be a tuple, list, or "
"`optuna.distributions.BaseDistribution`"
"instance when using optuna search")
elif search_optimization == "bohb":
import ConfigSpace.hyperparameters
if not isinstance(dist,
ConfigSpace.hyperparameters.Hyperparameter):
raise ValueError(
"distribution must be a tuple, list, or "
"`ConfigSpace.hyperparameters.Hyperparameter` "
"instance when using bohb search")
class TuneSearchCV(TuneBaseSearchCV):
"""Generic, non-grid search on hyper parameters.
Randomized search is invoked with ``search_optimization`` set to
``"random"`` and behaves like scikit-learn's ``RandomizedSearchCV``.
Bayesian search can be invoked with several values of
``search_optimization``.
- ``"bayesian"``, using https://scikit-optimize.github.io/stable/
- ``"bohb"``, using HpBandSter - https://github.com/automl/HpBandSter
Tree-Parzen Estimators search is invoked with ``search_optimization``
set to ``"hyperopt"``, using HyperOpt - http://hyperopt.github.io/hyperopt
All types of search aside from Randomized search require parent
libraries to be installed.
TuneSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_trials.
Args:
estimator (`estimator`): This is assumed to implement the
scikit-learn estimator interface. Either estimator needs to
provide a ``score`` function, or ``scoring`` must be passed.
param_distributions (`dict` or `list` or `ConfigurationSpace`): Serves
as the ``param_distributions`` parameter in scikit-learn's
``RandomizedSearchCV`` or as the ``search_space`` parameter in
``BayesSearchCV``.
For randomized search: dictionary with parameters names (string)
as keys and distributions or lists of parameter settings to try
for randomized search.
Distributions must provide a rvs method for sampling (such as
those from scipy.stats.distributions).
If a list is given, it is sampled uniformly. If a list of dicts is
given, first a dict is sampled uniformly, and then a parameter is
sampled using that dict as above.
For other types of search: dictionary with parameter names (string)
as keys. Values can be
- a (lower_bound, upper_bound) tuple (for Real or Integer params)
- a (lower_bound, upper_bound, "prior") tuple (for Real params)
- as a list of categories (for Categorical dimensions)
``"bayesian"`` (scikit-optimize) also accepts
- skopt.space.Dimension instance (Real, Integer or Categorical).
``"hyperopt"`` (HyperOpt) also accepts
- an instance of a hyperopt.pyll.base.Apply object.
``"bohb"`` (HpBandSter) also accepts
- ConfigSpace.hyperparameters.Hyperparameter instance.
``"optuna"`` (Optuna) also accepts
- an instance of a optuna.distributions.BaseDistribution object.
For ``"bohb"`` (HpBandSter) it is also possible to pass a
`ConfigSpace.ConfigurationSpace` object instead of dict or a list.
https://scikit-optimize.github.io/stable/modules/
classes.html#module-skopt.space.space
early_stopping (bool, str or :class:`TrialScheduler`, optional): Option
to stop fitting to a hyperparameter configuration if it performs
poorly. Possible inputs are:
- If True, defaults to ASHAScheduler.
- A string corresponding to the name of a Tune Trial Scheduler
(i.e., "ASHAScheduler"). To specify parameters of the scheduler,
pass in a scheduler object instead of a string.
- Scheduler for executing fit with early stopping. Only a subset
of schedulers are currently supported. The scheduler will only be
used if the estimator supports partial fitting
- If None or False, early stopping will not be used.
Unless a ``HyperBandForBOHB`` object is passed,
this parameter is ignored for ``"bohb"``, as it requires
``HyperBandForBOHB``.
n_trials (int): Number of parameter settings that are sampled.
n_trials trades off runtime vs quality of the solution.
Defaults to 10.
scoring (str, callable, list/tuple, dict, or None): A single
string or a callable to evaluate the predictions on the test set.
See https://scikit-learn.org/stable/modules/model_evaluation.html
#scoring-parameter for all options.
For evaluating multiple metrics, either give a list/tuple of
(unique) strings or a dict with names as keys and callables as
values.
If None, the estimator's score method is used. Defaults to None.
n_jobs (int): Number of jobs to run in parallel. None or -1 means
using all processors. Defaults to None. If set to 1, jobs
will be run using Ray's 'local mode'. This can
lead to significant speedups if the model takes < 10 seconds
to fit due to removing inter-process communication overheads.
sk_n_jobs (int): Number of jobs to run in parallel for cross validating
each hyperparameter set; the ``n_jobs`` parameter for
``cross_validate`` call to sklearn when early stopping isn't used.
refit (bool, str, or `callable`): Refit an estimator using the
best found parameters on the whole dataset.
For multiple metric evaluation, this needs to be a string denoting
the scorer that would be used to find the best parameters for
refitting the estimator at the end.
The refitted estimator is made available at the ``best_estimator_``
attribute and permits using ``predict`` directly on this
``GridSearchCV`` instance.
Also for multiple metric evaluation, the attributes
``best_index_``, ``best_score_`` and ``best_params_`` will only be
available if ``refit`` is set and all of them will be determined
w.r.t this specific scorer. If refit not needed, set to False.
See ``scoring`` parameter to know more about multiple metric
evaluation. Defaults to True.
cv (int, `cross-validation generator` or `iterable`): Determines
the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y``
is either binary or multiclass, :class:`StratifiedKFold` is used.
In all other cases, :class:`KFold` is used. Defaults to None.
verbose (int): Controls the verbosity: 0 = silent, 1 = only status
updates, 2 = status and trial results. Defaults to 0.
random_state (int or `RandomState`): Pseudo random number generator
state used for random uniform
sampling from lists of possible values instead of scipy.stats
distributions.
If int, random_state is the seed used by the random number
generator;
If RandomState instance, random_state is the random number
generator;
If None, the random number generator is the RandomState instance
used by np.random. Defaults to None.
Ignored when doing Bayesian search.
error_score ('raise' or int or float): Value to assign to the score if
an error occurs in estimator
fitting. If set to 'raise', the error is raised. If a numeric value
is given, FitFailedWarning is raised. This parameter does not
affect the refit step, which will always raise the error.
Defaults to np.nan.
return_train_score (bool): If ``False``, the ``cv_results_``
attribute will not include training scores. Defaults to False.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be
computationally expensive and is not strictly required to select
the parameters that yield the best generalization performance.
local_dir (str): A string that defines where checkpoints and logs will
be stored. Defaults to "~/ray_results"
max_iters (int): Indicates the maximum number of epochs to run for each
hyperparameter configuration sampled (specified by ``n_trials``).
This parameter is used for early stopping. Defaults to 1.
Depending on the classifier type provided, a resource parameter
(`resource_param = max_iter or n_estimators`) will be detected.
The value of `resource_param` will | |
- bool, optional
If True, will print out more diagnostic messages
dtype - string, optional
Data type for reading variables. Default is 32 bit float.
kelvin - bool (default True)
Whether to load data in Kelvin. (uses eV otherwise)
Examples
--------
>>> a = cross_sect(['h-h-data2.txt','h-h2-data.txt'], fdir="/data/cb24bih")
"""
def __init__(self, cross_tab=None, fdir=os.curdir, dtype='f4', verbose=None, kelvin=True, obj=None):
'''
Loads cross section tables and calculates collision frequencies and
ambipolar diffusion.
parameters:
cross_tab: None or list of strings
None -> use default cross tab list of strings.
else -> treat each string as the name of a cross tab file.
fdir: str (default '.')
directory of files (prepend to each filename in cross_tab).
dtype: default 'f4'
sets self.dtype. aside from that, internally does NOTHING.
verbose: None (default) or bool.
controls verbosity. presently, internally does NOTHING.
if None, use obj.verbose if possible, else use False (default)
kelvin - bool (default True)
Whether to load data in Kelvin. (uses eV otherwise)
obj: None (default) or an object
None -> does nothing; ignore this parameter.
else -> improve time-efficiency by saving data from cross_tab files
into memory of obj (save in obj._memory_read_cross_txt).
'''
self.fdir = fdir
self.dtype = dtype
if verbose is None:
verbose = False if obj is None else getattr(obj, 'verbose', False)
self.verbose = verbose
self.kelvin = kelvin
self.units = {True: 'K', False: 'eV'}[self.kelvin]
# save pointer to obj. Use weakref to help ensure we don't create a circular reference.
self.obj = (lambda: None) if (obj is None) else weakref.ref(obj) # self.obj() returns obj.
# read table file and calculate parameters
if cross_tab is None:
cross_tab = ['h-h-data2.txt', 'h-h2-data.txt', 'he-he.txt',
'e-h.txt', 'e-he.txt', 'h2_molecule_bc.txt',
'h2_molecule_pj.txt', 'p-h-elast.txt', 'p-he.txt',
'proton-h2-data.txt']
self._cross_tab_strs = cross_tab
self.cross_tab_list = {}
for i, cross_txt in enumerate(cross_tab):
self.cross_tab_list[i] = os.path.join(fdir, cross_txt)
# load table(s)
self.load_cross_tables(firstime=True)
def load_cross_tables(self,firstime=False):
'''
Collects the information in the cross table files.
'''
self.cross_tab = dict()
for itab in range(len(self.cross_tab_list)):
self.cross_tab[itab] = read_cross_txt(self.cross_tab_list[itab],firstime=firstime,
obj=self.obj(), kelvin=self.kelvin)
def tab_interp(self, tg, itab=0, out='el', order=1):
''' Interpolates the cross section tables in the simulated domain.
IN:
tg : Temperature [K]
order: interpolation order (1: linear, 3: cubic)
OUT:
'se' : Spin exchange cross section [a.u.]
'el' : Integral Elastic cross section [a.u.]
'mt' : momentum transfer cross section [a.u.]
'vi' : viscosity cross section [a.u.]
'''
if out in ['se el vi mt'.split()] and not self.load_cross_tables:
raise ValueError("(EEE) tab_interp: EOS table not loaded!")
finterp = interpolate.interp1d(self.cross_tab[itab]['tg'],
self.cross_tab[itab][out])
tgreg = np.array(tg, copy=True)
max_temp = np.max(self.cross_tab[itab]['tg'])
tgreg[tg > max_temp] = max_temp
min_temp = np.min(self.cross_tab[itab]['tg'])
tgreg[tg < min_temp] = min_temp
return finterp(tgreg)
def __repr__(self):
return '{} == {}'.format(object.__repr__(self), str(self))
def __str__(self):
return "Cross_sect(cross_tab={}, fdir='{}')".format(self._cross_tab_strs, self.fdir)
def cross_sect_for_obj(obj=None):
'''return function which returns Cross_sect with self.obj=obj.
obj: None (default) or an object
None -> does nothing; ignore this parameter.
else -> improve time-efficiency by saving data from cross_tab files
into memory of obj (save in obj._memory_read_cross_txt).
Also, use fdir=obj.fdir, unless fdir is entered explicitly.
'''
@functools.wraps(Cross_sect)
def _init_cross_sect(cross_tab=None, fdir=None, *args__Cross_sect, **kw__Cross_sect):
if fdir is None: fdir = getattr(obj, 'fdir', '.')
return Cross_sect(cross_tab, fdir, *args__Cross_sect, **kw__Cross_sect, obj=obj)
return _init_cross_sect
###########
# TOOLS #
###########
def bifrost2d_to_rh15d(snaps, outfile, file_root, meshfile, fdir, writeB=False,
sx=slice(None), sz=slice(None), desc=None):
"""
Reads a Bifrost 2D atmosphere are writes into RH 1.5D format,
with the time dimension written in the y dimension (to maximise
parallelism).
Parameters
----------
snaps : list or 1D array
Numbers of snapshots to write.
outfile: str
Name of RH 1.5D atmosphere file to write.
file_root: str
Basename for bifrost files.
meshfile : str
Filename of mesh file (including full path).
writeB : bool, optional
If True, will also write magnetic field. Default is False.
sx, sz : slice object, optional
Slice objects for x and z dimensions, when not all points
are needed. E.g. use slice(None) for all points, slice(0, 100, 2)
for every second point up to 100.
desc : str
Description.
"""
from . import rh15d
data = BifrostData(file_root, snap=snaps[0], meshfile=meshfile, fdir=fdir,
ghost_analyse=False)
nz = len(data.z[sz])
nx = max(len(data.x[sx]), len(data.y[sx]))
ny = len(snaps)
tgas = np.empty((nx, ny, nz), dtype='f')
vz = np.empty_like(tgas)
if writeB:
Bz = np.empty_like(tgas)
Bx = np.empty_like(tgas)
By = np.empty_like(tgas)
else:
Bz = None
Bx = None
By = None
ne = np.empty_like(tgas, dtype='d')
if data.hion:
nH = np.empty((6, ) + tgas.shape, dtype='f')
else:
nH = np.empty((1, ) + tgas.shape, dtype='f')
# unit conversion to SI
ul = data.params['u_l'] / 1.e2 # to metres
ur = data.params['u_r'] # to g/cm^3 (for ne_rt_table)
ut = data.params['u_t'] # to seconds
uv = ul / ut
ub = data.params['u_b'] * 1e-4 # to tgasesl
ue = data.params['u_ee'] # to erg/g
if not desc:
desc = 'BIFROST snapshot from 2D sequence %s, sx=%s sy=1 sz=%s.' % \
(file_root, repr(sx), repr(sz))
if data.hion:
desc = 'hion ' + desc
x = data.x[sx] * ul
y = snaps
z = data.z[sz] * (-ul)
rdt = data.r.dtype
# cstagger.init_stagger(data.nz, data.dx, data.dy, data.z.astype(rdt),
# data.zdn.astype(rdt), data.dzidzup.astype(rdt),
# data.dzidzdn.astype(rdt))
for i, s in enumerate(snaps):
data.set_snap(s)
tgas[:, i] = np.squeeze(data.tg)[sx, sz]
rho = np.squeeze(data.r)[sx, sz]
vz[:, i] = np.squeeze(do_cstagger(data.pz,'zup',obj=data))[sx, sz] / rho * (-uv)
if writeB:
Bx[:, i] = np.squeeze(data.bx)[sx, sz] * ub
By[:, i] = np.squeeze(-data.by)[sx, sz] * ub
Bz[:, i] = np.squeeze(-data.bz)[sx, sz] * ub
ne[:, i] = np.squeeze(data.get_electron_density(sx=sx, sz=sz)).to_value('1/m3')
nH[:, :, i] = np.squeeze(data.get_hydrogen_pops(sx=sx, sz=sz)).to_value('1/m3')
rh15d.make_xarray_atmos(outfile, tgas, vz, z, nH=nH, ne=ne, x=x, y=y,
append=False, Bx=Bx, By=By, Bz=Bz, desc=desc,
snap=snaps[0])
@file_memory.remember_and_recall('_memory_read_idl_ascii')
def read_idl_ascii(filename,firstime=False):
''' Reads IDL-formatted (command style) ascii file into dictionary.
if obj is not None, remember the result and restore it if ever reading the same exact file again.
'''
li = -1
params = {}
# go through the file, add stuff to dictionary
with open(filename) as fp:
for line in fp:
li += 1
# ignore empty lines and comments
line, _, comment = line.partition(';')
key, _, value = line.partition('=')
key = key.strip().lower()
value = value.strip()
if len(key) == 0:
continue # this was a blank line.
elif len(value) == 0:
if firstime:
print('(WWW) read_params: line %i is invalid, skipping' % li)
continue
# --- evaluate value --- #
## allow '.false.' or '.true.' for bools
if (value.lower() in ['.false.', '.true.']):
value = False if value.lower() == '.false.' else True
else:
## safely evaluate any other type of value
try:
value = ast.literal_eval(value)
except Exception:
## failed to evaluate. Might be string, or might be int with leading 0's.
try:
value = int(value)
except ValueError:
## failed to convert to int; interpret value as string.
pass # leave value as string without evaluating it.
params[key] = value
return params
@file_memory.remember_and_recall('_memory_read_cross_txt', kw_mem=['kelvin'])
def read_cross_txt(filename,firstime=False, kelvin=True):
''' Reads IDL-formatted (command style) ascii file into dictionary.
tg will be converted to Kelvin, unless kelvin==False.
'''
li = 0
params = {}
count = 0
# go through the file, add stuff to dictionary
with open(filename) as fp:
for line in fp:
# ignore empty lines and comments
line = line.strip()
if len(line) < 1:
li += 1
continue
if line[0] == ';':
li += 1
continue
line = line.split(';')[0].split()
if (len(line) == 1):
params['crossunits'] = float(line[0].strip())
li += 1
continue
elif not('crossunits' in params.keys()):
print('(WWW) read_cross: line %i is invalid, missing crossunits, file %s' % (li,filename))
if (len(line) < 2):
if (firstime):
print('(WWW) read_cross: line %i is invalid, skipping, file %s' % (li,filename))
li += 1
continue
# force lowercase because IDL is case-insensitive
temp = line[0].strip()
cross = line[2].strip()
# instead of the insecure 'exec', find out the datatypes
if ((temp.upper().find('E') >= 0) or (temp.find('.') >= 0)):
# float type
temp = float(temp)
else:
# int type
try:
temp = int(temp)
except Exception:
if (firstime):
print('(WWW) read_cross: could not find datatype in '
'line %i, skipping' % li)
li += 1
continue
if not('tg' in params.keys()):
params['tg'] = temp
else:
params['tg'] = np.append(params['tg'], temp)
if ((cross.upper().find('E') >= 0) or (cross.find('.') >= 0)):
# float type
cross = float(cross)
else:
# int | |
+
(tweet_gap.gap_22 * user_surfacing_tweet_vectors.gap_22) +
(tweet_gap.gap_23 * user_surfacing_tweet_vectors.gap_23) +
(tweet_gap.gap_24 * user_surfacing_tweet_vectors.gap_24) +
(tweet_gap.gap_25 * user_surfacing_tweet_vectors.gap_25) +
(tweet_gap.gap_26 * user_surfacing_tweet_vectors.gap_26) +
(tweet_gap.gap_27 * user_surfacing_tweet_vectors.gap_27) +
(tweet_gap.gap_28 * user_surfacing_tweet_vectors.gap_28) +
(tweet_gap.gap_29 * user_surfacing_tweet_vectors.gap_29) +
(tweet_gap.gap_30 * user_surfacing_tweet_vectors.gap_30) +
(tweet_gap.gap_31 * user_surfacing_tweet_vectors.gap_31) +
(tweet_gap.gap_32 * user_surfacing_tweet_vectors.gap_32) +
(tweet_gap.gap_33 * user_surfacing_tweet_vectors.gap_33) +
(tweet_gap.gap_34 * user_surfacing_tweet_vectors.gap_34) +
(tweet_gap.gap_35 * user_surfacing_tweet_vectors.gap_35) +
(tweet_gap.gap_36 * user_surfacing_tweet_vectors.gap_36) +
(tweet_gap.gap_37 * user_surfacing_tweet_vectors.gap_37) +
(tweet_gap.gap_38 * user_surfacing_tweet_vectors.gap_38) +
(tweet_gap.gap_39 * user_surfacing_tweet_vectors.gap_39) +
(tweet_gap.gap_40 * user_surfacing_tweet_vectors.gap_40) +
(tweet_gap.gap_41 * user_surfacing_tweet_vectors.gap_41) +
(tweet_gap.gap_42 * user_surfacing_tweet_vectors.gap_42) +
(tweet_gap.gap_43 * user_surfacing_tweet_vectors.gap_43) +
(tweet_gap.gap_44 * user_surfacing_tweet_vectors.gap_44) +
(tweet_gap.gap_45 * user_surfacing_tweet_vectors.gap_45) +
(tweet_gap.gap_46 * user_surfacing_tweet_vectors.gap_46) +
(tweet_gap.gap_47 * user_surfacing_tweet_vectors.gap_47) +
(tweet_gap.gap_48 * user_surfacing_tweet_vectors.gap_48) +
(tweet_gap.gap_49 * user_surfacing_tweet_vectors.gap_49) +
(tweet_gap.gap_50 * user_surfacing_tweet_vectors.gap_50) +
(tweet_gap.gap_51 * user_surfacing_tweet_vectors.gap_51) +
(tweet_gap.gap_52 * user_surfacing_tweet_vectors.gap_52) +
(tweet_gap.gap_53 * user_surfacing_tweet_vectors.gap_53) +
(tweet_gap.gap_54 * user_surfacing_tweet_vectors.gap_54) +
(tweet_gap.gap_55 * user_surfacing_tweet_vectors.gap_55) +
(tweet_gap.gap_56 * user_surfacing_tweet_vectors.gap_56) +
(tweet_gap.gap_57 * user_surfacing_tweet_vectors.gap_57) +
(tweet_gap.gap_58 * user_surfacing_tweet_vectors.gap_58) +
(tweet_gap.gap_59 * user_surfacing_tweet_vectors.gap_59) +
(tweet_gap.gap_60 * user_surfacing_tweet_vectors.gap_60) +
(tweet_gap.gap_61 * user_surfacing_tweet_vectors.gap_61) +
(tweet_gap.gap_62 * user_surfacing_tweet_vectors.gap_62) +
(tweet_gap.gap_63 * user_surfacing_tweet_vectors.gap_63) +
(tweet_gap.gap_64 * user_surfacing_tweet_vectors.gap_64) +
(tweet_gap.gap_65 * user_surfacing_tweet_vectors.gap_65) +
(tweet_gap.gap_66 * user_surfacing_tweet_vectors.gap_66) +
(tweet_gap.gap_67 * user_surfacing_tweet_vectors.gap_67) +
(tweet_gap.gap_68 * user_surfacing_tweet_vectors.gap_68) +
(tweet_gap.gap_69 * user_surfacing_tweet_vectors.gap_69) +
(tweet_gap.gap_70 * user_surfacing_tweet_vectors.gap_70) +
(tweet_gap.gap_71 * user_surfacing_tweet_vectors.gap_71) +
(tweet_gap.gap_72 * user_surfacing_tweet_vectors.gap_72) +
(tweet_gap.gap_73 * user_surfacing_tweet_vectors.gap_73) +
(tweet_gap.gap_74 * user_surfacing_tweet_vectors.gap_74) +
(tweet_gap.gap_75 * user_surfacing_tweet_vectors.gap_75) +
(tweet_gap.gap_76 * user_surfacing_tweet_vectors.gap_76) +
(tweet_gap.gap_77 * user_surfacing_tweet_vectors.gap_77) +
(tweet_gap.gap_78 * user_surfacing_tweet_vectors.gap_78) +
(tweet_gap.gap_79 * user_surfacing_tweet_vectors.gap_79) +
(tweet_gap.gap_80 * user_surfacing_tweet_vectors.gap_80) +
(tweet_gap.gap_81 * user_surfacing_tweet_vectors.gap_81) +
(tweet_gap.gap_82 * user_surfacing_tweet_vectors.gap_82) +
(tweet_gap.gap_83 * user_surfacing_tweet_vectors.gap_83) +
(tweet_gap.gap_84 * user_surfacing_tweet_vectors.gap_84) +
(tweet_gap.gap_85 * user_surfacing_tweet_vectors.gap_85) +
(tweet_gap.gap_86 * user_surfacing_tweet_vectors.gap_86) +
(tweet_gap.gap_87 * user_surfacing_tweet_vectors.gap_87) +
(tweet_gap.gap_88 * user_surfacing_tweet_vectors.gap_88) +
(tweet_gap.gap_89 * user_surfacing_tweet_vectors.gap_89) +
(tweet_gap.gap_90 * user_surfacing_tweet_vectors.gap_90) +
(tweet_gap.gap_91 * user_surfacing_tweet_vectors.gap_91) +
(tweet_gap.gap_92 * user_surfacing_tweet_vectors.gap_92) +
(tweet_gap.gap_93 * user_surfacing_tweet_vectors.gap_93) +
(tweet_gap.gap_94 * user_surfacing_tweet_vectors.gap_94) +
(tweet_gap.gap_95 * user_surfacing_tweet_vectors.gap_95) +
(tweet_gap.gap_96 * user_surfacing_tweet_vectors.gap_96) +
(tweet_gap.gap_97 * user_surfacing_tweet_vectors.gap_97) +
(tweet_gap.gap_98 * user_surfacing_tweet_vectors.gap_98) +
(tweet_gap.gap_99 * user_surfacing_tweet_vectors.gap_99) +
(tweet_gap.gap_100 * user_surfacing_tweet_vectors.gap_100) +
(tweet_gap.gap_101 * user_surfacing_tweet_vectors.gap_101) +
(tweet_gap.gap_102 * user_surfacing_tweet_vectors.gap_102) +
(tweet_gap.gap_103 * user_surfacing_tweet_vectors.gap_103) +
(tweet_gap.gap_104 * user_surfacing_tweet_vectors.gap_104) +
(tweet_gap.gap_105 * user_surfacing_tweet_vectors.gap_105) +
(tweet_gap.gap_106 * user_surfacing_tweet_vectors.gap_106) +
(tweet_gap.gap_107 * user_surfacing_tweet_vectors.gap_107) +
(tweet_gap.gap_108 * user_surfacing_tweet_vectors.gap_108) +
(tweet_gap.gap_109 * user_surfacing_tweet_vectors.gap_109) +
(tweet_gap.gap_110 * user_surfacing_tweet_vectors.gap_110) +
(tweet_gap.gap_111 * user_surfacing_tweet_vectors.gap_111) +
(tweet_gap.gap_112 * user_surfacing_tweet_vectors.gap_112) +
(tweet_gap.gap_113 * user_surfacing_tweet_vectors.gap_113) +
(tweet_gap.gap_114 * user_surfacing_tweet_vectors.gap_114) +
(tweet_gap.gap_115 * user_surfacing_tweet_vectors.gap_115) +
(tweet_gap.gap_116 * user_surfacing_tweet_vectors.gap_116) +
(tweet_gap.gap_117 * user_surfacing_tweet_vectors.gap_117) +
(tweet_gap.gap_118 * user_surfacing_tweet_vectors.gap_118) +
(tweet_gap.gap_119 * user_surfacing_tweet_vectors.gap_119) +
(tweet_gap.gap_120 * user_surfacing_tweet_vectors.gap_120) +
(tweet_gap.gap_121 * user_surfacing_tweet_vectors.gap_121) +
(tweet_gap.gap_122 * user_surfacing_tweet_vectors.gap_122) +
(tweet_gap.gap_123 * user_surfacing_tweet_vectors.gap_123) +
(tweet_gap.gap_124 * user_surfacing_tweet_vectors.gap_124) +
(tweet_gap.gap_125 * user_surfacing_tweet_vectors.gap_125) +
(tweet_gap.gap_126 * user_surfacing_tweet_vectors.gap_126) +
(tweet_gap.gap_127 * user_surfacing_tweet_vectors.gap_127) +
(tweet_gap.gap_128 * user_surfacing_tweet_vectors.gap_128) +
(tweet_gap.gap_129 * user_surfacing_tweet_vectors.gap_129) +
(tweet_gap.gap_130 * user_surfacing_tweet_vectors.gap_130) +
(tweet_gap.gap_131 * user_surfacing_tweet_vectors.gap_131) +
(tweet_gap.gap_132 * user_surfacing_tweet_vectors.gap_132) +
(tweet_gap.gap_133 * user_surfacing_tweet_vectors.gap_133) +
(tweet_gap.gap_134 * user_surfacing_tweet_vectors.gap_134) +
(tweet_gap.gap_135 * user_surfacing_tweet_vectors.gap_135) +
(tweet_gap.gap_136 * user_surfacing_tweet_vectors.gap_136) +
(tweet_gap.gap_137 * user_surfacing_tweet_vectors.gap_137) +
(tweet_gap.gap_138 * user_surfacing_tweet_vectors.gap_138) +
(tweet_gap.gap_139 * user_surfacing_tweet_vectors.gap_139) +
(tweet_gap.gap_140 * user_surfacing_tweet_vectors.gap_140) +
(tweet_gap.gap_141 * user_surfacing_tweet_vectors.gap_141) +
(tweet_gap.gap_142 * user_surfacing_tweet_vectors.gap_142) +
(tweet_gap.gap_143 * user_surfacing_tweet_vectors.gap_143) +
(tweet_gap.gap_144 * user_surfacing_tweet_vectors.gap_144) +
(tweet_gap.gap_145 * user_surfacing_tweet_vectors.gap_145) +
(tweet_gap.gap_146 * user_surfacing_tweet_vectors.gap_146) +
(tweet_gap.gap_147 * user_surfacing_tweet_vectors.gap_147) +
(tweet_gap.gap_148 * user_surfacing_tweet_vectors.gap_148) +
(tweet_gap.gap_149 * user_surfacing_tweet_vectors.gap_149) +
(tweet_gap.gap_150 * user_surfacing_tweet_vectors.gap_150) +
(tweet_gap.gap_151 * user_surfacing_tweet_vectors.gap_151) +
(tweet_gap.gap_152 * user_surfacing_tweet_vectors.gap_152) +
(tweet_gap.gap_153 * user_surfacing_tweet_vectors.gap_153) +
(tweet_gap.gap_154 * user_surfacing_tweet_vectors.gap_154) +
(tweet_gap.gap_155 * user_surfacing_tweet_vectors.gap_155) +
(tweet_gap.gap_156 * user_surfacing_tweet_vectors.gap_156) +
(tweet_gap.gap_157 * user_surfacing_tweet_vectors.gap_157) +
(tweet_gap.gap_158 * user_surfacing_tweet_vectors.gap_158) +
(tweet_gap.gap_159 * user_surfacing_tweet_vectors.gap_159) +
(tweet_gap.gap_160 * user_surfacing_tweet_vectors.gap_160) +
(tweet_gap.gap_161 * user_surfacing_tweet_vectors.gap_161) +
(tweet_gap.gap_162 * user_surfacing_tweet_vectors.gap_162) +
(tweet_gap.gap_163 * user_surfacing_tweet_vectors.gap_163) +
(tweet_gap.gap_164 * user_surfacing_tweet_vectors.gap_164) +
(tweet_gap.gap_165 * user_surfacing_tweet_vectors.gap_165) +
(tweet_gap.gap_166 * user_surfacing_tweet_vectors.gap_166) +
(tweet_gap.gap_167 * user_surfacing_tweet_vectors.gap_167) +
(tweet_gap.gap_168 * user_surfacing_tweet_vectors.gap_168) +
(tweet_gap.gap_169 * user_surfacing_tweet_vectors.gap_169) +
(tweet_gap.gap_170 * user_surfacing_tweet_vectors.gap_170) +
(tweet_gap.gap_171 * user_surfacing_tweet_vectors.gap_171) +
(tweet_gap.gap_172 * user_surfacing_tweet_vectors.gap_172) +
(tweet_gap.gap_173 * user_surfacing_tweet_vectors.gap_173) +
(tweet_gap.gap_174 * user_surfacing_tweet_vectors.gap_174) +
(tweet_gap.gap_175 * user_surfacing_tweet_vectors.gap_175) +
(tweet_gap.gap_176 * user_surfacing_tweet_vectors.gap_176) +
(tweet_gap.gap_177 * user_surfacing_tweet_vectors.gap_177) +
(tweet_gap.gap_178 * user_surfacing_tweet_vectors.gap_178) +
(tweet_gap.gap_179 * user_surfacing_tweet_vectors.gap_179) +
(tweet_gap.gap_180 * user_surfacing_tweet_vectors.gap_180) +
(tweet_gap.gap_181 * user_surfacing_tweet_vectors.gap_181) +
(tweet_gap.gap_182 * user_surfacing_tweet_vectors.gap_182) +
(tweet_gap.gap_183 * user_surfacing_tweet_vectors.gap_183) +
(tweet_gap.gap_184 * user_surfacing_tweet_vectors.gap_184) +
(tweet_gap.gap_185 * user_surfacing_tweet_vectors.gap_185) +
(tweet_gap.gap_186 * user_surfacing_tweet_vectors.gap_186) +
(tweet_gap.gap_187 * user_surfacing_tweet_vectors.gap_187) +
(tweet_gap.gap_188 * user_surfacing_tweet_vectors.gap_188) +
(tweet_gap.gap_189 * user_surfacing_tweet_vectors.gap_189) +
(tweet_gap.gap_190 * user_surfacing_tweet_vectors.gap_190) +
(tweet_gap.gap_191 * user_surfacing_tweet_vectors.gap_191) +
(tweet_gap.gap_192 * user_surfacing_tweet_vectors.gap_192) +
(tweet_gap.gap_193 * user_surfacing_tweet_vectors.gap_193) +
(tweet_gap.gap_194 * user_surfacing_tweet_vectors.gap_194) +
(tweet_gap.gap_195 * user_surfacing_tweet_vectors.gap_195) +
(tweet_gap.gap_196 * user_surfacing_tweet_vectors.gap_196) +
(tweet_gap.gap_197 * user_surfacing_tweet_vectors.gap_197) +
(tweet_gap.gap_198 * user_surfacing_tweet_vectors.gap_198) +
(tweet_gap.gap_199 * user_surfacing_tweet_vectors.gap_199) +
(tweet_gap.gap_200 * user_surfacing_tweet_vectors.gap_200) +
(tweet_gap.gap_201 * user_surfacing_tweet_vectors.gap_201) +
(tweet_gap.gap_202 * user_surfacing_tweet_vectors.gap_202) +
(tweet_gap.gap_203 * user_surfacing_tweet_vectors.gap_203) +
(tweet_gap.gap_204 * user_surfacing_tweet_vectors.gap_204) +
(tweet_gap.gap_205 * user_surfacing_tweet_vectors.gap_205) +
(tweet_gap.gap_206 * user_surfacing_tweet_vectors.gap_206) +
(tweet_gap.gap_207 * user_surfacing_tweet_vectors.gap_207) +
(tweet_gap.gap_208 * user_surfacing_tweet_vectors.gap_208) +
(tweet_gap.gap_209 * user_surfacing_tweet_vectors.gap_209) +
(tweet_gap.gap_210 * user_surfacing_tweet_vectors.gap_210) +
(tweet_gap.gap_211 * user_surfacing_tweet_vectors.gap_211) +
(tweet_gap.gap_212 * user_surfacing_tweet_vectors.gap_212) +
(tweet_gap.gap_213 * user_surfacing_tweet_vectors.gap_213) +
(tweet_gap.gap_214 * user_surfacing_tweet_vectors.gap_214) +
(tweet_gap.gap_215 * user_surfacing_tweet_vectors.gap_215) +
(tweet_gap.gap_216 * user_surfacing_tweet_vectors.gap_216) +
(tweet_gap.gap_217 * user_surfacing_tweet_vectors.gap_217) +
(tweet_gap.gap_218 * user_surfacing_tweet_vectors.gap_218) +
(tweet_gap.gap_219 * user_surfacing_tweet_vectors.gap_219) +
(tweet_gap.gap_220 * user_surfacing_tweet_vectors.gap_220) +
(tweet_gap.gap_221 * user_surfacing_tweet_vectors.gap_221) +
(tweet_gap.gap_222 * user_surfacing_tweet_vectors.gap_222) +
(tweet_gap.gap_223 * user_surfacing_tweet_vectors.gap_223) +
(tweet_gap.gap_224 * user_surfacing_tweet_vectors.gap_224) +
(tweet_gap.gap_225 * user_surfacing_tweet_vectors.gap_225) +
(tweet_gap.gap_226 * user_surfacing_tweet_vectors.gap_226) +
(tweet_gap.gap_227 * user_surfacing_tweet_vectors.gap_227) +
(tweet_gap.gap_228 * user_surfacing_tweet_vectors.gap_228) +
(tweet_gap.gap_229 * user_surfacing_tweet_vectors.gap_229) +
(tweet_gap.gap_230 * user_surfacing_tweet_vectors.gap_230) +
(tweet_gap.gap_231 * user_surfacing_tweet_vectors.gap_231) +
(tweet_gap.gap_232 * user_surfacing_tweet_vectors.gap_232) +
(tweet_gap.gap_233 * user_surfacing_tweet_vectors.gap_233) +
(tweet_gap.gap_234 * user_surfacing_tweet_vectors.gap_234) +
(tweet_gap.gap_235 * user_surfacing_tweet_vectors.gap_235) +
(tweet_gap.gap_236 * user_surfacing_tweet_vectors.gap_236) +
(tweet_gap.gap_237 * user_surfacing_tweet_vectors.gap_237) +
(tweet_gap.gap_238 * user_surfacing_tweet_vectors.gap_238) +
(tweet_gap.gap_239 * user_surfacing_tweet_vectors.gap_239) +
(tweet_gap.gap_240 * user_surfacing_tweet_vectors.gap_240) +
(tweet_gap.gap_241 * user_surfacing_tweet_vectors.gap_241) +
(tweet_gap.gap_242 * user_surfacing_tweet_vectors.gap_242) +
(tweet_gap.gap_243 * user_surfacing_tweet_vectors.gap_243) +
(tweet_gap.gap_244 * user_surfacing_tweet_vectors.gap_244) +
(tweet_gap.gap_245 * user_surfacing_tweet_vectors.gap_245) +
(tweet_gap.gap_246 * user_surfacing_tweet_vectors.gap_246) +
(tweet_gap.gap_247 * user_surfacing_tweet_vectors.gap_247) +
(tweet_gap.gap_248 * user_surfacing_tweet_vectors.gap_248) +
(tweet_gap.gap_249 * user_surfacing_tweet_vectors.gap_249) +
(tweet_gap.gap_250 * user_surfacing_tweet_vectors.gap_250) +
(tweet_gap.gap_251 * user_surfacing_tweet_vectors.gap_251) +
(tweet_gap.gap_252 * user_surfacing_tweet_vectors.gap_252) +
(tweet_gap.gap_253 * user_surfacing_tweet_vectors.gap_253) +
(tweet_gap.gap_254 * user_surfacing_tweet_vectors.gap_254) +
(tweet_gap.gap_255 * user_surfacing_tweet_vectors.gap_255) +
(tweet_gap.gap_256 * user_surfacing_tweet_vectors.gap_256) +
(tweet_gap.gap_257 * user_surfacing_tweet_vectors.gap_257) +
(tweet_gap.gap_258 * user_surfacing_tweet_vectors.gap_258) +
(tweet_gap.gap_259 * user_surfacing_tweet_vectors.gap_259) +
(tweet_gap.gap_260 * user_surfacing_tweet_vectors.gap_260) +
(tweet_gap.gap_261 * user_surfacing_tweet_vectors.gap_261) +
(tweet_gap.gap_262 * user_surfacing_tweet_vectors.gap_262) +
(tweet_gap.gap_263 * user_surfacing_tweet_vectors.gap_263) +
(tweet_gap.gap_264 * user_surfacing_tweet_vectors.gap_264) +
(tweet_gap.gap_265 * user_surfacing_tweet_vectors.gap_265) +
(tweet_gap.gap_266 * user_surfacing_tweet_vectors.gap_266) +
(tweet_gap.gap_267 * user_surfacing_tweet_vectors.gap_267) +
(tweet_gap.gap_268 * user_surfacing_tweet_vectors.gap_268) +
(tweet_gap.gap_269 * user_surfacing_tweet_vectors.gap_269) +
(tweet_gap.gap_270 * user_surfacing_tweet_vectors.gap_270) +
(tweet_gap.gap_271 * user_surfacing_tweet_vectors.gap_271) +
(tweet_gap.gap_272 * user_surfacing_tweet_vectors.gap_272) +
(tweet_gap.gap_273 * user_surfacing_tweet_vectors.gap_273) +
(tweet_gap.gap_274 * user_surfacing_tweet_vectors.gap_274) +
(tweet_gap.gap_275 * user_surfacing_tweet_vectors.gap_275) +
(tweet_gap.gap_276 * user_surfacing_tweet_vectors.gap_276) +
(tweet_gap.gap_277 * user_surfacing_tweet_vectors.gap_277) +
(tweet_gap.gap_278 * user_surfacing_tweet_vectors.gap_278) +
(tweet_gap.gap_279 * user_surfacing_tweet_vectors.gap_279) +
(tweet_gap.gap_280 * user_surfacing_tweet_vectors.gap_280) +
(tweet_gap.gap_281 * user_surfacing_tweet_vectors.gap_281) +
(tweet_gap.gap_282 * user_surfacing_tweet_vectors.gap_282) +
(tweet_gap.gap_283 * user_surfacing_tweet_vectors.gap_283) +
(tweet_gap.gap_284 * user_surfacing_tweet_vectors.gap_284) +
(tweet_gap.gap_285 * user_surfacing_tweet_vectors.gap_285) +
(tweet_gap.gap_286 * user_surfacing_tweet_vectors.gap_286) +
(tweet_gap.gap_287 * user_surfacing_tweet_vectors.gap_287) +
(tweet_gap.gap_288 * user_surfacing_tweet_vectors.gap_288) +
(tweet_gap.gap_289 * user_surfacing_tweet_vectors.gap_289) +
(tweet_gap.gap_290 * user_surfacing_tweet_vectors.gap_290) +
(tweet_gap.gap_291 * user_surfacing_tweet_vectors.gap_291) +
(tweet_gap.gap_292 * user_surfacing_tweet_vectors.gap_292) +
(tweet_gap.gap_293 * user_surfacing_tweet_vectors.gap_293) +
(tweet_gap.gap_294 * user_surfacing_tweet_vectors.gap_294) +
(tweet_gap.gap_295 * user_surfacing_tweet_vectors.gap_295) +
(tweet_gap.gap_296 * user_surfacing_tweet_vectors.gap_296) +
(tweet_gap.gap_297 * user_surfacing_tweet_vectors.gap_297) +
(tweet_gap.gap_298 * user_surfacing_tweet_vectors.gap_298) +
(tweet_gap.gap_299 * user_surfacing_tweet_vectors.gap_299) +
(tweet_gap.gap_300 * user_surfacing_tweet_vectors.gap_300) +
(tweet_gap.gap_301 * user_surfacing_tweet_vectors.gap_301) +
(tweet_gap.gap_302 * user_surfacing_tweet_vectors.gap_302) +
(tweet_gap.gap_303 * user_surfacing_tweet_vectors.gap_303) +
(tweet_gap.gap_304 * user_surfacing_tweet_vectors.gap_304) +
(tweet_gap.gap_305 * user_surfacing_tweet_vectors.gap_305) +
(tweet_gap.gap_306 * user_surfacing_tweet_vectors.gap_306) +
(tweet_gap.gap_307 * user_surfacing_tweet_vectors.gap_307) +
(tweet_gap.gap_308 * user_surfacing_tweet_vectors.gap_308) +
(tweet_gap.gap_309 * user_surfacing_tweet_vectors.gap_309) +
(tweet_gap.gap_310 * user_surfacing_tweet_vectors.gap_310) +
(tweet_gap.gap_311 * user_surfacing_tweet_vectors.gap_311) +
(tweet_gap.gap_312 * user_surfacing_tweet_vectors.gap_312) +
(tweet_gap.gap_313 * user_surfacing_tweet_vectors.gap_313) +
(tweet_gap.gap_314 * user_surfacing_tweet_vectors.gap_314) +
(tweet_gap.gap_315 * user_surfacing_tweet_vectors.gap_315) +
(tweet_gap.gap_316 * user_surfacing_tweet_vectors.gap_316) +
(tweet_gap.gap_317 * user_surfacing_tweet_vectors.gap_317) +
(tweet_gap.gap_318 * user_surfacing_tweet_vectors.gap_318) +
(tweet_gap.gap_319 * user_surfacing_tweet_vectors.gap_319) +
(tweet_gap.gap_320 * user_surfacing_tweet_vectors.gap_320) +
(tweet_gap.gap_321 * user_surfacing_tweet_vectors.gap_321) +
(tweet_gap.gap_322 * user_surfacing_tweet_vectors.gap_322) +
(tweet_gap.gap_323 * user_surfacing_tweet_vectors.gap_323) +
(tweet_gap.gap_324 * user_surfacing_tweet_vectors.gap_324) +
(tweet_gap.gap_325 * user_surfacing_tweet_vectors.gap_325) +
(tweet_gap.gap_326 * user_surfacing_tweet_vectors.gap_326) +
(tweet_gap.gap_327 * user_surfacing_tweet_vectors.gap_327) +
(tweet_gap.gap_328 * user_surfacing_tweet_vectors.gap_328) +
(tweet_gap.gap_329 * user_surfacing_tweet_vectors.gap_329) +
(tweet_gap.gap_330 * user_surfacing_tweet_vectors.gap_330) +
(tweet_gap.gap_331 * user_surfacing_tweet_vectors.gap_331) +
(tweet_gap.gap_332 * user_surfacing_tweet_vectors.gap_332) +
(tweet_gap.gap_333 * user_surfacing_tweet_vectors.gap_333) +
(tweet_gap.gap_334 * user_surfacing_tweet_vectors.gap_334) +
(tweet_gap.gap_335 * user_surfacing_tweet_vectors.gap_335) +
(tweet_gap.gap_336 * user_surfacing_tweet_vectors.gap_336) +
(tweet_gap.gap_337 * user_surfacing_tweet_vectors.gap_337) +
(tweet_gap.gap_338 * user_surfacing_tweet_vectors.gap_338) +
(tweet_gap.gap_339 * user_surfacing_tweet_vectors.gap_339) +
(tweet_gap.gap_340 * user_surfacing_tweet_vectors.gap_340) +
(tweet_gap.gap_341 * user_surfacing_tweet_vectors.gap_341) | |
if 'lnn' in params:
path_params['Lnn'] = params['lnn'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/3/cluster/nodes/{Lnn}/hardware-fast', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NodeHardwareFast', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_node_internal_ip_address(self, lnn, **kwargs): # noqa: E501
"""get_node_internal_ip_address # noqa: E501
View internal ip address with respect to node. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_node_internal_ip_address(lnn, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int lnn: (required)
:return: NodeInternalIpAddress
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_node_internal_ip_address_with_http_info(lnn, **kwargs) # noqa: E501
else:
(data) = self.get_node_internal_ip_address_with_http_info(lnn, **kwargs) # noqa: E501
return data
def get_node_internal_ip_address_with_http_info(self, lnn, **kwargs): # noqa: E501
"""get_node_internal_ip_address # noqa: E501
View internal ip address with respect to node. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_node_internal_ip_address_with_http_info(lnn, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int lnn: (required)
:return: NodeInternalIpAddress
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['lnn'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_node_internal_ip_address" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'lnn' is set
if ('lnn' not in params or
params['lnn'] is None):
raise ValueError("Missing the required parameter `lnn` when calling `get_node_internal_ip_address`") # noqa: E501
collection_formats = {}
path_params = {}
if 'lnn' in params:
path_params['Lnn'] = params['lnn'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/7/cluster/nodes/{Lnn}/internal-ip-address', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NodeInternalIpAddress', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_node_partitions(self, lnn, **kwargs): # noqa: E501
"""get_node_partitions # noqa: E501
Retrieve node partition information. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_node_partitions(lnn, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int lnn: (required)
:return: NodePartitions
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_node_partitions_with_http_info(lnn, **kwargs) # noqa: E501
else:
(data) = self.get_node_partitions_with_http_info(lnn, **kwargs) # noqa: E501
return data
def get_node_partitions_with_http_info(self, lnn, **kwargs): # noqa: E501
"""get_node_partitions # noqa: E501
Retrieve node partition information. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_node_partitions_with_http_info(lnn, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int lnn: (required)
:return: NodePartitions
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['lnn'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_node_partitions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'lnn' is set
if ('lnn' not in params or
params['lnn'] is None):
raise ValueError("Missing the required parameter `lnn` when calling `get_node_partitions`") # noqa: E501
collection_formats = {}
path_params = {}
if 'lnn' in params:
path_params['Lnn'] = params['lnn'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/3/cluster/nodes/{Lnn}/partitions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NodePartitions', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_node_sensors(self, lnn, **kwargs): # noqa: E501
"""get_node_sensors # noqa: E501
Retrieve node sensor information. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_node_sensors(lnn, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int lnn: (required)
:return: NodeSensors
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_node_sensors_with_http_info(lnn, **kwargs) # noqa: E501
else:
(data) = self.get_node_sensors_with_http_info(lnn, **kwargs) # noqa: E501
return data
def get_node_sensors_with_http_info(self, lnn, **kwargs): # noqa: E501
"""get_node_sensors # noqa: E501
Retrieve node sensor information. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_node_sensors_with_http_info(lnn, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int lnn: (required)
:return: NodeSensors
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['lnn'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_node_sensors" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'lnn' is set
if ('lnn' not in params or
params['lnn'] is None):
raise ValueError("Missing the required parameter `lnn` when calling `get_node_sensors`") # noqa: E501
collection_formats = {}
path_params = {}
if 'lnn' in params:
path_params['Lnn'] = params['lnn'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/3/cluster/nodes/{Lnn}/sensors', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NodeSensors', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_node_sled(self, node_sled_id, lnn, **kwargs): # noqa: E501
"""get_node_sled # noqa: E501
Get detailed information for the sled specified by <SLEDID>, or all sleds in the case where <SLEDID> is 'all', in the node specified by <LNN>. Accepts <sledid> in either 'sled' or 'all' formats. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_node_sled(node_sled_id, lnn, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str node_sled_id: Get detailed information for the sled specified by <SLEDID>, or all sleds in the case where <SLEDID> is 'all', in the node specified by <LNN>. Accepts <sledid> in either 'sled' or 'all' formats. (required)
:param int lnn: (required)
:param float timeout: Request timeout
:return: NodeSleds
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_node_sled_with_http_info(node_sled_id, lnn, **kwargs) # noqa: E501
else:
(data) = self.get_node_sled_with_http_info(node_sled_id, lnn, **kwargs) # noqa: E501
return data
def get_node_sled_with_http_info(self, node_sled_id, lnn, **kwargs): # noqa: E501
"""get_node_sled # noqa: E501
Get detailed information for the sled specified by <SLEDID>, or all sleds in the case where <SLEDID> is 'all', in the node specified by <LNN>. Accepts <sledid> in either 'sled' or 'all' formats. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_node_sled_with_http_info(node_sled_id, lnn, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str node_sled_id: Get detailed information for the sled specified by <SLEDID>, or all sleds in the case where <SLEDID> is 'all', in the node | |
#!/usr/bin/env python3import
import glob
import os
import tempfile
from collections import OrderedDict, defaultdict
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Set,
Tuple,
Union,
cast,
)
import torch
from captum._utils.av import AV
from captum._utils.common import _get_module_from_name
from captum.concept._core.concept import Concept
from captum.concept._core.tcav import TCAV
from captum.concept._utils.classifier import Classifier
from captum.concept._utils.common import concepts_to_str
from captum.concept._utils.data_iterator import dataset_to_dataloader
from tests.helpers.basic import BaseTest, assertTensorAlmostEqual
from tests.helpers.basic_models import BasicModel_ConvNet
from torch import Tensor
from torch.utils.data import DataLoader, IterableDataset
class CustomClassifier(Classifier):
r"""
Wrapps a custom linear Classifier that is necessary for the
impementation of Concept Activation Vectors (TCAVs), as described
in the paper:
https://arxiv.org/pdf/1711.11279.pdf
This class simulates the output of a Linear Classifier such as
sklearn without actually using it.
"""
def __init__(self) -> None:
Classifier.__init__(self)
def train_and_eval(
self, dataloader: DataLoader, **kwargs: Any
) -> Union[Dict, None]:
inputs = []
labels = []
for input, label in dataloader:
inputs.append(input)
labels.append(label)
inputs = torch.cat(inputs)
labels = torch.cat(labels)
# update concept ids aka classes
self._classes = list(OrderedDict.fromkeys([label.item() for label in labels]))
# Training is skipped for performance and indepenence of sklearn reasons
_, x_test, _, y_test = train_test_split(inputs, labels)
# A tensor with dimensions n_inputs x (1 - test_split) x n_concepts
# should be returned here.
# Assemble a list with size inputs.shape[0], divided in 4 quarters
# [0, 0, 0, ... | 1, 1, 1, ... | 0, 0, 0, ... | 1, 1, 1, ... ]
pred = [0] * x_test.shape[0]
# Store the shape of 1/4 of inputs.shape[0] (sh_4) and use it
sh_4 = x_test.shape[0] / 4
for i in range(1, 4, 2):
from_ = round(i * sh_4)
to_ = round((i + 1) * sh_4)
pred[from_:to_] = [1] * (round((i + 1) * sh_4) - round(i * sh_4))
y_pred = torch.tensor(pred)
score = y_pred == y_test
accs = score.float().mean()
# A hack to mock weights for two different layer
self.num_features = input.shape[1]
return {"accs": accs}
def weights(self) -> Tensor:
if self.num_features != 16:
return torch.randn(2, self.num_features)
return torch.tensor(
[
[
-0.2167,
-0.0809,
-0.1235,
-0.2450,
0.2954,
0.5409,
-0.2587,
-0.3428,
0.2486,
-0.0123,
0.2737,
0.4876,
-0.1133,
0.1616,
-0.2016,
-0.0413,
],
[
-0.2167,
-0.0809,
-0.1235,
-0.2450,
0.2954,
0.5409,
-0.2587,
-0.3428,
0.2486,
-0.0123,
0.2737,
0.4876,
-0.1133,
0.2616,
-0.2016,
-0.0413,
],
],
dtype=torch.float64,
)
def classes(self) -> List[int]:
return self._classes
class CustomClassifier_WO_Returning_Metrics(CustomClassifier):
def __init__(self) -> None:
CustomClassifier.__init__(self)
def train_and_eval(
self, dataloader: DataLoader, **kwargs: Any
) -> Union[Dict, None]:
CustomClassifier.train_and_eval(self, dataloader)
return None
class CustomClassifier_W_Flipped_Class_Id(CustomClassifier):
def __init__(self) -> None:
CustomClassifier.__init__(self)
def weights(self) -> Tensor:
_weights = CustomClassifier.weights(self)
_weights[0], _weights[1] = _weights[1], _weights[0].clone()
return _weights
def classes(self) -> List[int]:
_classes = CustomClassifier.classes(self)
_classes[0], _classes[1] = _classes[1], _classes[0]
return _classes
class CustomIterableDataset(IterableDataset):
r"""
Auxiliary class for iterating through an image dataset.
"""
def __init__(
self, get_tensor_from_filename_func: Callable, path: str, num_samples=100
) -> None:
r"""
Args:
path (str): Path to dataset files
"""
self.path = path
self.file_itr = ["x"] * num_samples
self.get_tensor_from_filename_func = get_tensor_from_filename_func
def get_tensor_from_filename(self, filename: str) -> Tensor:
return self.get_tensor_from_filename_func(filename)
def __iter__(self) -> Iterator:
mapped_itr = map(self.get_tensor_from_filename, self.file_itr)
return mapped_itr
def train_test_split(
x_list: Tensor, y_list: Union[Tensor, List[int]], test_split: float = 0.33
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
z_list = list(zip(x_list, y_list))
# Split
test_size = int(test_split * len(z_list))
z_test, z_train = z_list[:test_size], z_list[test_size:]
x_test, y_test = zip(*z_test)
x_train, y_train = zip(*z_train)
x_train = torch.stack(x_train)
x_test = torch.stack(x_test)
y_train = torch.stack(y_train)
y_test = torch.stack(y_test)
y_train[: len(y_train) // 2] = 0
y_train[len(y_train) // 2 :] = 1
y_test[: len(y_test) // 2] = 0
y_test[len(y_test) // 2 :] = 1
return x_train, x_test, y_train, y_test
def get_tensor_from_filename(filename: str) -> Tensor:
file_tensor = (
torch.tensor(
[
[
[
0.4963,
0.7682,
0.0885,
0.1320,
0.3074,
0.6341,
0.4901,
0.8964,
0.4556,
0.6323,
],
[
0.3489,
0.4017,
0.0223,
0.1689,
0.2939,
0.5185,
0.6977,
0.8000,
0.1610,
0.2823,
],
[
0.6816,
0.9152,
0.3971,
0.8742,
0.4194,
0.5529,
0.9527,
0.0362,
0.1852,
0.3734,
],
[
0.3051,
0.9320,
0.1759,
0.2698,
0.1507,
0.0317,
0.2081,
0.9298,
0.7231,
0.7423,
],
[
0.5263,
0.2437,
0.5846,
0.0332,
0.1387,
0.2422,
0.8155,
0.7932,
0.2783,
0.4820,
],
[
0.8198,
0.9971,
0.6984,
0.5675,
0.8352,
0.2056,
0.5932,
0.1123,
0.1535,
0.2417,
],
[
0.7262,
0.7011,
0.2038,
0.6511,
0.7745,
0.4369,
0.5191,
0.6159,
0.8102,
0.9801,
],
[
0.1147,
0.3168,
0.6965,
0.9143,
0.9351,
0.9412,
0.5995,
0.0652,
0.5460,
0.1872,
],
[
0.0340,
0.9442,
0.8802,
0.0012,
0.5936,
0.4158,
0.4177,
0.2711,
0.6923,
0.2038,
],
[
0.6833,
0.7529,
0.8579,
0.6870,
0.0051,
0.1757,
0.7497,
0.6047,
0.1100,
0.2121,
],
]
]
)
* 100
)
return file_tensor
def get_inputs_tensor() -> Tensor:
input_tensor = torch.tensor(
[
[
[
[
-1.1258e00,
-1.1524e00,
-2.5058e-01,
-4.3388e-01,
8.4871e-01,
6.9201e-01,
-3.1601e-01,
-2.1152e00,
3.2227e-01,
-1.2633e00,
],
[
3.4998e-01,
3.0813e-01,
1.1984e-01,
1.2377e00,
1.1168e00,
-2.4728e-01,
-1.3527e00,
-1.6959e00,
5.6665e-01,
7.9351e-01,
],
[
5.9884e-01,
-1.5551e00,
-3.4136e-01,
1.8530e00,
7.5019e-01,
-5.8550e-01,
-1.7340e-01,
1.8348e-01,
1.3894e00,
1.5863e00,
],
[
9.4630e-01,
-8.4368e-01,
-6.1358e-01,
3.1593e-02,
-4.9268e-01,
2.4841e-01,
4.3970e-01,
1.1241e-01,
6.4079e-01,
4.4116e-01,
],
[
-1.0231e-01,
7.9244e-01,
-2.8967e-01,
5.2507e-02,
5.2286e-01,
2.3022e00,
-1.4689e00,
-1.5867e00,
-6.7309e-01,
8.7283e-01,
],
[
1.0554e00,
1.7784e-01,
-2.3034e-01,
-3.9175e-01,
5.4329e-01,
-3.9516e-01,
-4.4622e-01,
7.4402e-01,
1.5210e00,
3.4105e00,
],
[
-1.5312e00,
-1.2341e00,
1.8197e00,
-5.5153e-01,
-5.6925e-01,
9.1997e-01,
1.1108e00,
1.2899e00,
-1.4782e00,
2.5672e00,
],
[
-4.7312e-01,
3.3555e-01,
-1.6293e00,
-5.4974e-01,
-4.7983e-01,
-4.9968e-01,
-1.0670e00,
1.1149e00,
-1.4067e-01,
8.0575e-01,
],
[
-9.3348e-02,
6.8705e-01,
-8.3832e-01,
8.9182e-04,
8.4189e-01,
-4.0003e-01,
1.0395e00,
3.5815e-01,
-2.4600e-01,
2.3025e00,
],
[
-1.8817e00,
-4.9727e-02,
-1.0450e00,
-9.5650e-01,
3.3532e-02,
7.1009e-01,
1.6459e00,
-1.3602e00,
3.4457e-01,
5.1987e-01,
],
]
],
[
[
[
-2.6133e00,
-1.6965e00,
-2.2824e-01,
2.7995e-01,
2.4693e-01,
7.6887e-02,
3.3801e-01,
4.5440e-01,
4.5694e-01,
-8.6537e-01,
],
[
7.8131e-01,
-9.2679e-01,
-2.1883e-01,
-2.4351e00,
-7.2915e-02,
-3.3986e-02,
9.6252e-01,
3.4917e-01,
-9.2146e-01,
-5.6195e-02,
],
[
-6.2270e-01,
-4.6372e-01,
1.9218e00,
-4.0255e-01,
1.2390e-01,
1.1648e00,
9.2337e-01,
1.3873e00,
-8.8338e-01,
-4.1891e-01,
],
[
-8.0483e-01,
5.6561e-01,
6.1036e-01,
4.6688e-01,
1.9507e00,
-1.0631e00,
-7.7326e-02,
1.1640e-01,
-5.9399e-01,
-1.2439e00,
],
[
-1.0209e-01,
-1.0335e00,
-3.1264e-01,
2.4579e-01,
-2.5964e-01,
1.1834e-01,
2.4396e-01,
1.1646e00,
2.8858e-01,
3.8660e-01,
],
[
-2.0106e-01,
-1.1793e-01,
1.9220e-01,
-7.7216e-01,
-1.9003e00,
1.3068e-01,
-7.0429e-01,
3.1472e-01,
1.5739e-01,
3.8536e-01,
],
[
9.6715e-01,
-9.9108e-01,
3.0161e-01,
-1.0732e-01,
9.9846e-01,
-4.9871e-01,
7.6111e-01,
6.1830e-01,
3.1405e-01,
2.1333e-01,
],
[
-1.2005e-01,
3.6046e-01,
-3.1403e-01,
-1.0787e00,
2.4081e-01,
-1.3962e00,
-6.6144e-02,
-3.5836e-01,
-1.5616e00,
-3.5464e-01,
],
[
1.0811e00,
1.3148e-01,
1.5735e00,
7.8143e-01,
-5.1107e-01,
-1.7137e00,
-5.1006e-01,
-4.7489e-01,
-6.3340e-01,
-1.4677e00,
],
[
-8.7848e-01,
-2.0784e00,
-1.1005e00,
-7.2013e-01,
1.1931e-02,
3.3977e-01,
-2.6345e-01,
1.2805e00,
1.9395e-02,
-8.8080e-01,
],
]
],
],
requires_grad=True,
)
return input_tensor
def create_concept(concept_name: str, concept_id: int) -> Concept:
concepts_path = "./dummy/concepts/" + concept_name + "/"
dataset = CustomIterableDataset(get_tensor_from_filename, concepts_path)
concept_iter = dataset_to_dataloader(dataset)
concept = Concept(id=concept_id, name=concept_name, data_iter=concept_iter)
return concept
def create_concepts() -> Tuple[List[Concept], Dict[str, Concept]]:
# Function to create concept objects from a pre-set concept name list.
concept_names = ["striped", "ceo", "random", "dotted"]
concept_list = []
concept_dict: Dict[str, Concept] = defaultdict()
for c, concept_name in enumerate(concept_names):
concept = create_concept(concept_name, c)
concept_list.append(concept)
concept_dict[concept_name] = concept
return concept_list, concept_dict
def find_concept_by_id(concepts: Set[Concept], id: int) -> Union[Concept, None]:
for concept in concepts:
if concept.id == id:
return concept
return None
def create_TCAV(save_path: str, classifier: Classifier, layers) -> TCAV:
model = BasicModel_ConvNet()
tcav = TCAV(
model,
layers,
classifier,
save_path=save_path,
)
return tcav
def init_TCAV(
save_path: str, classifier: Classifier, layers: Union[str, List[str]]
) -> Tuple[TCAV, Dict[str, Concept]]:
# Create Concepts
concepts, concepts_dict = create_concepts()
tcav = create_TCAV(save_path, classifier, layers)
return tcav, concepts_dict
def remove_pkls(path: str) -> None:
pkl_files = glob.glob(os.path.join(path, "*.pkl"))
for pkl_file in pkl_files:
os.remove(pkl_file)
class Test(BaseTest):
r"""
Class for testing the TCAV class through a sequence of operations:
- Create the Concepts (random tensor generation simulation)
- Create the TCAV class
- Generate Activations
- Compute the CAVs
- Interpret (the images - simulated with random tensors)
"""
def test_compute_cav_repeating_concept_ids(self) -> None:
with tempfile.TemporaryDirectory() as tmpdirname:
tcav = create_TCAV(tmpdirname, CustomClassifier(), "conv1")
experimental_sets = [
[create_concept("striped", 0), create_concept("random", 1)],
[create_concept("ceo", 2), create_concept("striped2", 0)],
]
with self.assertRaises(AssertionError):
tcav.compute_cavs(experimental_sets)
def test_compute_cav_repeating_concept_names(self) -> None:
with tempfile.TemporaryDirectory() as tmpdirname:
tcav = create_TCAV(tmpdirname, CustomClassifier(), "conv1")
experimental_sets = [
[create_concept("striped", 0), create_concept("random", 1)],
[create_concept("ceo", 2), create_concept("striped", 3)],
]
cavs = tcav.compute_cavs(experimental_sets)
self.assertTrue("0-1" in cavs.keys())
self.assertTrue("2-3" in cavs.keys())
self.assertEqual(cavs["0-1"]["conv1"].layer, "conv1")
self.assertEqual(cavs["2-3"]["conv1"].layer, "conv1")
self.assertEqual(cavs["0-1"]["conv1"].concepts[0].id, 0)
self.assertEqual(cavs["0-1"]["conv1"].concepts[0].name, "striped")
self.assertEqual(cavs["0-1"]["conv1"].concepts[1].id, 1)
self.assertEqual(cavs["0-1"]["conv1"].concepts[1].name, "random")
self.assertEqual(cavs["0-1"]["conv1"].stats["classes"], [0, 1])
self.assertAlmostEqual(
cavs["0-1"]["conv1"].stats["accs"].item(), 0.4848, delta=0.001
)
self.assertEqual(
list(cavs["0-1"]["conv1"].stats["weights"].shape), [2, 128]
)
self.assertEqual(cavs["2-3"]["conv1"].concepts[0].id, 2)
self.assertEqual(cavs["2-3"]["conv1"].concepts[0].name, "ceo")
self.assertEqual(cavs["2-3"]["conv1"].concepts[1].id, 3)
self.assertEqual(cavs["2-3"]["conv1"].concepts[1].name, "striped")
self.assertEqual(cavs["2-3"]["conv1"].stats["classes"], [2, 3])
self.assertAlmostEqual(
cavs["2-3"]["conv1"].stats["accs"].item(), 0.4848, delta=0.001
)
self.assertEqual(
list(cavs["2-3"]["conv1"].stats["weights"].shape), [2, 128]
)
def compute_cavs_interpret(
self,
experimental_sets: List[List[str]],
force_train: bool,
accs: float,
sign_count: float,
magnitude: float,
processes: int = 1,
remove_activation: bool = False,
layers: Union[str, List[str]] = "conv2",
) -> None:
classifier | |
<filename>reactorcore/services/cache.py
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
from redis.exceptions import RedisError
from time import time
import collections
import pickle
import logging
import re
from tornado import gen
from tornado import concurrent
from reactorcore.dao.redis import RedisSource
from reactorcore.services.base import BaseService
logger = logging.getLogger(__name__)
class AbstractCache:
__metaclass__ = ABCMeta
@abstractmethod
def set(self, *args, **kwargs):
pass
@abstractmethod
def unique_add(self, *args, **kwargs):
pass
@abstractmethod
def get_unique_set(self, *args, **kwargs):
pass
@abstractmethod
def get(self, *args, **kwargs):
pass
@abstractmethod
def get_int(self, *args, **kwargs):
pass
@abstractmethod
def get_array(self, *args, **kwargs):
pass
@abstractmethod
def get_multi(self, *keys):
pass
@gen.coroutine
def incr(self, key, ticks=1):
logger.debug('Incrementing "%s" by %s', key, ticks)
return
@gen.coroutine
def decr(self, key, ticks=1):
logger.debug('Decrementing "%s" by %s', key, ticks)
yield self.incr(key, ticks * -1)
@abstractmethod
def prepend(self, *args, **kwargs):
pass
@abstractmethod
def append(self, *args, **kwargs):
pass
@abstractmethod
def remove(self, *args, **kwargs):
pass
@abstractmethod
def flush(self, *args, **kwargs):
pass
@abstractmethod
def flush_all(self):
pass
class VoidCache(BaseService, AbstractCache):
"""
Pass-through cache
"""
@gen.coroutine
def set(self, *args, **kwargs):
return
@gen.coroutine
def unique_add(self, *args, **kwargs):
return
@gen.coroutine
def get_unique_set(self, *args, **kwargs):
pass
@gen.coroutine
def get(self, *args, **kwargs):
return
@gen.coroutine
def get_int(self, *args, **kwargs):
return 0
@gen.coroutine
def get_array(self, *args, **kwargs):
return []
@gen.coroutine
def incr(self, key, ticks=1):
pass
@gen.coroutine
def decr(self, key, ticks=1):
pass
@gen.coroutine
def delete(self, *args, **kwargs):
pass
@gen.coroutine
def flush(self, *args, **kwargs):
pass
@gen.coroutine
def flush_all(self):
pass
@gen.coroutine
def get_multi(self, *keys_in):
raise gen.Return(dict.fromkeys(keys_in))
class RedisCache(RedisSource, BaseService, AbstractCache):
"""
Redis-based cache
"""
FLUSH_STEP = 1000
def __init__(self):
super(RedisCache, self).__init__(name="CACHE", cls=self.__class__)
self.prefix = "cache:"
@concurrent.run_on_executor
def set(self, key, value, expire=None):
logger.debug('Setting cache key "%s" with TTL %s', key, expire)
key = self.prefix + key
pickled_val = pickle.dumps(value)
try:
if expire is not None:
# Add key and define an expire in a pipeline for atomicity
with self.client.pipeline() as pipe:
pipe.set(key, pickled_val)
pipe.expire(key, expire)
pipe.execute()
else:
self.client.set(key, pickled_val)
except RedisError as ex:
logger.critical(
"[EXCEPTION] Error on cache SET: %s", ex.message, exc_info=True
)
@concurrent.run_on_executor
def get(self, key):
logger.debug('Getting key "%s"', key)
key = self.prefix + key
data = None
value = None
try:
data = self.client.get(key)
# unpickle
value = pickle.loads(data) if data else None
logger.debug('Value for "%s": %s', key, value)
except RedisError as ex:
logger.critical(
"[EXCEPTION] Error on cache GET: %s", ex.message, exc_info=True
)
return value
@gen.coroutine
def unique_add(self, set_name, value):
logger.debug('Adding "%s" to set "%s"', value, set_name)
try:
self.client.sadd(set_name, value)
except RedisError as ex:
logger.critical(
"[EXCEPTION] Error on cache GET: %s", ex.message, exc_info=True
)
@gen.coroutine
def get_unique_set(self, set_name):
logger.debug('Getting set "%s"', set_name)
try:
members = self.client.smembers(set_name)
except RedisError as ex:
logger.critical(
"[EXCEPTION] Error on cache GET: %s", ex.message, exc_info=True
)
members = members or set()
logger.debug('%s items in "%s"', len(members), set_name)
return members
@concurrent.run_on_executor
def get_int(self, key):
logger.debug('Getting key "%s"', key)
key = self.prefix + key
value = None
try:
value = self.client.get(key)
logger.debug('Value for "%s": %s', key, value)
except RedisError as ex:
logger.critical(
"[EXCEPTION] Error on cache GET: %s", ex.message, exc_info=True
)
return value or 0
@concurrent.run_on_executor
def get_array(self, key, count=None):
assert count
logger.debug('Getting array for key "%s" with %s items', key, count)
key = self.prefix + key
arr = []
try:
data = self.client.lrange(key, 0, count - 1)
# unpickle elements
if data:
arr = map(lambda x: pickle.loads(x), data)
logger.debug('Value for "%s": %s', key, arr)
except RedisError as ex:
logger.critical(
"[EXCEPTION] Error on cache GET: %s", ex.message, exc_info=True
)
return arr
@concurrent.run_on_executor
def get_multi(self, *keys_in):
if not keys_in:
return None
logger.debug('Getting keys "%s"', keys_in)
keys = [self.prefix + key for key in keys_in]
lookup = None
try:
data = self.client.mget(keys)
# unpickle values
values = [(val and pickle.loads(str(val))) or None for val in data]
# remove the cache prefix from keys
keys = [key[len(self.prefix) :] for key in keys]
# pack into key/val dictionary so it's more usable for the client
lookup = dict(zip(keys, values))
logger.debug("Cache data: %s", lookup)
except RedisError as ex:
logger.critical(
"[EXCEPTION] Error on cache MGET: %s",
ex.message,
exc_info=True,
)
return dict.fromkeys(keys_in)
except pickle.UnpicklingError as ex:
logger.critical(
"[EXCEPTION] Unpickle error: %s", ex.message, exc_info=True
)
return dict.fromkeys(keys_in)
return lookup
@concurrent.run_on_executor
def incr(self, key, ticks=1):
assert key
logger.debug('Incrementing "%s" by %s', key, ticks)
key = self.prefix + key
try:
self.client.incr(key, ticks)
except RedisError as ex:
logger.critical(
"[EXCEPTION] Error on cache INCR: %s",
ex.message,
exc_info=True,
)
@gen.coroutine
def prepend(self, key, value, size=1000):
assert key
assert size > 1
logger.debug('Prepending "%s" to %s', value, key)
key = self.prefix + key
pickled_val = pickle.dumps(value)
try:
with self.client.pipeline() as pipe:
pipe.lpush(key, pickled_val)
pipe.ltrim(key, 0, size - 1)
pipe.execute()
except RedisError as ex:
logger.critical(
"[EXCEPTION] Error on cache PREPEND: %s",
ex.message,
exc_info=True,
)
@gen.coroutine
def append(self, key, value, size=1000):
assert key
assert size > 1
logger.debug('Appending "%s" to %s', value, key)
key = self.prefix + key
pickled_val = pickle.dumps(value)
try:
with self.client.pipeline() as pipe:
pipe.rpush(key, pickled_val)
pipe.ltrim(key, 0, size - 1)
pipe.execute()
except RedisError as ex:
logger.critical(
"[EXCEPTION] Error on cache APPEND: %s",
ex.message,
exc_info=True,
)
@concurrent.run_on_executor
def remove(self, *keys_in):
if not keys_in:
return None
logger.debug("Deleting keys %s", keys_in)
# add prefix
keys = [self.prefix + key for key in keys_in]
try:
with self.client.pipeline() as pipe:
pipe.delete(*keys)
pipe.execute()
except RedisError as ex:
logger.critical(
"[EXCEPTION] Error on cache DELETE: %s",
ex.message,
exc_info=True,
)
@concurrent.run_on_executor
def flush(self, pattern=None):
if not pattern:
return
logger.debug('Flushing pattern "%s"', pattern)
try:
"""Flush all cache (by group of step keys for efficiency),
or only keys matching an optional pattern"""
keys = self.client.keys(self.prefix + pattern)
for i in xrange(0, len(keys), self.FLUSH_STEP):
keys_to_flush = keys[i : i + self.FLUSH_STEP]
logger.debug("Flushing cache keys %s", keys_to_flush)
self.client.delete(*keys_to_flush)
except RedisError as ex:
logger.critical(
"[EXCEPTION] Error on cache FLUSH: %s",
ex.message,
exc_info=True,
)
@concurrent.run_on_executor
def flush_all(self):
logger.debug("FLUSH ALL")
# flush all keys for this environment
return self.flush(self.prefix + "*")
"""
Add hashing functionality
"""
@gen.coroutine
def set_hash(self, key, val):
key = self.prefix + key
try:
self.client.hmset(key, val)
except RedisError as ex:
logger.critical(
"[EXCEPTION] Error on cache HASH SET: %s",
ex.message,
exc_info=True,
)
@gen.coroutine
def delete_hash_key(self, r_hash, *keys):
r_hash = self.prefix + r_hash
try:
return self.client.hdel(r_hash, *keys)
except RedisError as ex:
logger.critical(
"[EXCEPTION] Error on cache HASH DEL: %s",
ex.message,
exc_info=True,
)
@gen.coroutine
def get_hash(self, key, hash_key):
key = self.prefix + key
try:
return self.client.hget(key, hash_key)
except RedisError as ex:
logger.critical(
"[EXCEPTION] Error on cache HASH GET: %s",
ex.message,
exc_info=True,
)
@gen.coroutine
def get_all_hashes(self, key):
key = self.prefix + key
try:
return self.client.hgetall(key)
except RedisError as ex:
logger.critical(
"[EXCEPTION] Error on cache GET ALL HASHES: %s",
ex.message,
exec_info=True,
)
@gen.coroutine
def get_hash_size(self, key):
key = self.prefix + key
try:
return self.client.hlen(key)
except RedisError as ex:
logger.critical(
"[EXCEPTION] Error on cache GET HASH LENGTH: %s",
ex.message,
exec_info=True,
)
@gen.coroutine
def get_keys(self, pattern):
try:
return self.client.keys(pattern)
except RedisError as ex:
logger.critical(
"[EXCEPTION] Error on cache GET KEYS: %s",
ex.message,
exec_info=True,
)
@gen.coroutine
def trim_array(self, key, start, end):
key = self.prefix + key
try:
return self.client.ltrim(key, start, end)
except RedisError as ex:
logger.critical(
"[EXCEPTION] Error on cache TRIM ARRAY: %s",
ex.message,
exec_info=True,
)
@gen.coroutine
def set_zset(self, key, **sets):
key = self.prefix + key
try:
return self.client.zadd(key, **sets)
except RedisError as ex:
logger.critical(
"[EXCEPTION] Error on cache SET ZSET: %s",
ex.message,
exec_info=True,
)
@gen.coroutine
def get_zrangebyscore(
self, key, min_score, max_score, start=None, num=None, withscores=False
):
key = self.prefix + key
try:
return self.client.zrangebyscore(
key,
min_score,
max_score,
start=start,
num=num,
withscores=withscores,
)
except RedisError as ex:
logger.critical(
"[EXCEPTION] Error on cache GET ZRANGEBYSCORE: %s",
ex.message,
exec_info=True,
)
@gen.coroutine
def del_zrangebyscore(self, key, min_score, max_score):
key = self.prefix + key
try:
return self.client.zremrangebyscore(key, min_score, max_score)
except RedisError as ex:
logger.critical(
"[EXCEPTION] Error on cache ZREMRANGEBYSCORE: %s",
ex.message,
exec_info=True,
)
class MemoryCache(AbstractCache):
"""
A very simple implementation of memory-based caching,
to test the interface and decorators.
"""
def __init__(self):
self._cache = dict()
self.hits = 0
self.misses = 0
@gen.coroutine
def set(self, key, value, expire=None):
logger.debug('Setting cache key "%s" with TTL %s', key, expire)
self._cache[key] = {"ttl": expire, "created": time(), "val": value}
@gen.coroutine
def unique_add(self, set_name, value):
logger.debug('Adding "%s" to set "%s"', value, set_name)
if set_name not in self._cache:
self._cache[set_name] = set()
self._cache[set_name].add(value)
| |
hue values from HSV NumPy array as a 1-dimensional array. If output as an int array, the original float
values are multiplied by 360 for their degree equivalents for simplicity. For more information, see
https://en.wikipedia.org/wiki/HSL_and_HSV
Args:
hsv: HSV image as a NumPy array.
output_type: Type of array to return (float or int).
display_np_info: If True, display NumPy array info and filter time.
Returns:
Hue values (float or int) as a 1-dimensional NumPy array.
"""
h = hsv[:, :, 0]
h = h.flatten()
h *= 360
h = h.astype(np.uint8)
return h
def filter_hsv_to_s(hsv):
"""
Experimental HSV to S (saturation).
Args:
hsv: HSV image as a NumPy array.
Returns:
Saturation values as a 1-dimensional NumPy array.
"""
s = hsv[:, :, 1]
s = s.flatten()
return s
def filter_hsv_to_v(hsv):
"""
Experimental HSV to V (value).
Args:
hsv: HSV image as a NumPy array.
Returns:
Value values as a 1-dimensional NumPy array.
"""
v = hsv[:, :, 2]
v = v.flatten()
return v
def filter_hed_to_hematoxylin(np_img):
"""
Obtain Hematoxylin channel from HED NumPy array and rescale it (for example, to 0 to 255 for uint8) for increased
contrast.
Args:
np_img: HED image as a NumPy array.
output_type: Type of array to return (float or uint8).
Returns:
NumPy array for Hematoxylin channel.
"""
hema = np_img[:, :, 0]
hema = (sk_exposure.rescale_intensity(hema, out_range=(0, 255))).astype(np.uint8)
return hema
def filter_hed_to_eosin(np_img):
"""
Obtain Eosin channel from HED NumPy array and rescale it (for example, to 0 to 255 for uint8) for increased
contrast.
Args:
np_img: HED image as a NumPy array.
output_type: Type of array to return (float or uint8).
Returns:
NumPy array for Eosin channel.
"""
eosin = np_img[:, :, 1]
eosin = (sk_exposure.rescale_intensity(eosin, out_range=(0, 255))).astype(np.uint8)
return eosin
def filter_binary_erosion(np_img, disk_size=5, iterations=1, output_type="bool"):
"""
Erode a binary object (bool, float, or uint8).
Args:
np_img: Binary image as a NumPy array.
disk_size: Radius of the disk structuring element used for erosion.
iterations: How many times to repeat the erosion.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array (bool, float, or uint8) where edges have been eroded.
"""
if np_img.dtype == "uint8":
np_img = np_img / 255
result = sc_morph.binary_erosion(
np_img, sk_morphology.disk(disk_size), iterations=iterations
)
if output_type == "bool":
pass
elif output_type == "float":
result = result.astype(float)
else:
result = result.astype("uint8") * 255
return result
def filter_binary_dilation(np_img, disk_size=5, iterations=1, output_type="bool"):
"""
Dilate a binary object (bool, float, or uint8).
Args:
np_img: Binary image as a NumPy array.
disk_size: Radius of the disk structuring element used for dilation.
iterations: How many times to repeat the dilation.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array (bool, float, or uint8) where edges have been dilated.
"""
if np_img.dtype == "uint8":
np_img = np_img / 255
result = sc_morph.binary_dilation(
np_img, sk_morphology.disk(disk_size), iterations=iterations
)
if output_type == "bool":
pass
elif output_type == "float":
result = result.astype(float)
else:
result = result.astype("uint8") * 255
return result
def filter_threshold(np_img, threshold):
"""
Return mask where a pixel has a value if it exceeds the threshold value.
Args:
np_img: Binary image as a NumPy array.
threshold: The threshold value to exceed.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array representing a mask where a pixel has a value (T, 1.0, or 255) if the corresponding input array
pixel exceeds the threshold value.
"""
result = np_img > threshold
result = result.astype(np.uint8) * 255
return result
def uint8_to_bool(np_img):
"""
Convert NumPy array of uint8 (255,0) values to bool (True,False) values
Args:
np_img: Binary image as NumPy array of uint8 (255,0) values.
Returns:
NumPy array of bool (True,False) values.
"""
result = (np_img / 255).astype(bool)
return result
def mask_rgb(rgb, mask):
"""
Apply a binary (T/F, 1/0) mask to a 3-channel RGB image and output the result.
Args:
rgb: RGB image as a NumPy array.
mask: An image mask to determine which pixels in the original image should be displayed.
Returns:
NumPy array representing an RGB image with mask applied.
"""
result = rgb * np.dstack([mask, mask, mask])
return result
def mask_percent(np_img):
"""
Determine the percentage of a NumPy array that is masked (how many of the values are 0 values).
Args:
np_img: Image as a NumPy array.
Returns:
The percentage of the NumPy array that is masked.
"""
if (len(np_img.shape) == 3) and (np_img.shape[2] == 3):
np_sum = np_img[:, :, 0] + np_img[:, :, 1] + np_img[:, :, 2]
mask_percentage = 100 - np.count_nonzero(np_sum) / np_sum.size * 100
else:
mask_percentage = 100 - np.count_nonzero(np_img) / np_img.size * 100
return mask_percentage
def filter_green_channel(
np_img,
green_thresh=200,
avoid_overmask=True,
overmask_thresh=90,
output_type="bool",
):
"""
Create a mask to filter out pixels with a green channel value greater than a particular threshold, since hematoxylin
and eosin are purplish and pinkish, which do not have much green to them.
Args:
np_img: RGB image as a NumPy array.
green_thresh: Green channel threshold value (0 to 255). If value is greater than green_thresh, mask out pixel.
avoid_overmask: If True, avoid masking above the overmask_thresh percentage.
overmask_thresh: If avoid_overmask is True, avoid masking above this threshold percentage value.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array representing a mask where pixels above a particular green channel threshold have been masked out.
"""
g = np_img[:, :, 1]
gr_ch_mask = (g < green_thresh) & (g > 0)
mask_percentage = mask_percent(gr_ch_mask)
if (
(mask_percentage >= overmask_thresh)
and (green_thresh < 255)
and (avoid_overmask is True)
):
new_green_thresh = math.ceil((255 - green_thresh) / 2 + green_thresh)
# print(
# "Mask percentage %3.2f%% >= overmask threshold %3.2f%% for Remove Green Channel green_thresh=%d, so try %d"
# % (mask_percentage, overmask_thresh, green_thresh, new_green_thresh)
# )
gr_ch_mask = filter_green_channel(
np_img, new_green_thresh, avoid_overmask, overmask_thresh, output_type
)
np_img = gr_ch_mask
if output_type == "bool":
pass
elif output_type == "float":
np_img = np_img.astype(float)
else:
np_img = np_img.astype("uint8") * 255
return np_img
def filter_remove_small_objects (
np_img, min_size=3000, avoid_overmask=True, overmask_thresh=95, output_type="uint8"
):
"""
Filter image to remove small objects (connected components) less than a particular minimum size. If avoid_overmask
is True, this function can recursively call itself with progressively smaller minimum size objects to remove to
reduce the amount of masking that this filter performs.
Args:
np_img: Image as a NumPy array of type bool.
min_size: Minimum size of small object to remove.
avoid_overmask: If True, avoid masking above the overmask_thresh percentage.
overmask_thresh: If avoid_overmask is True, avoid masking above this threshold percentage value.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array (bool, float, or uint8).
"""
rem_sm = np_img.astype(bool) # make sure mask is boolean
rem_sm = sk_morphology.remove_small_objects(rem_sm, min_size=min_size)
mask_percentage = mask_percent(rem_sm)
if (
(mask_percentage >= overmask_thresh)
and (min_size >= 1)
and (avoid_overmask is True)
):
new_min_size = min_size / 2
# print(
# "Mask percentage %3.2f%% >= overmask threshold %3.2f%% for Remove Small Objs size %d, so try %d"
# % (mask_percentage, overmask_thresh, min_size, new_min_size)
# )
rem_sm = filter_remove_small_objects(
np_img, new_min_size, avoid_overmask, overmask_thresh, output_type
)
np_img = rem_sm
if output_type == "bool":
pass
elif output_type == "float":
np_img = np_img.astype(float)
else:
np_img = np_img.astype("uint8") * 255
return np_img
def filter_grays(rgb, tolerance=15, output_type="bool"):
"""
Create a mask to filter out pixels where the red, green, and blue channel values are similar.
Args:
np_img: RGB image as a NumPy array.
tolerance: Tolerance value to determine how similar the values must be in order to be filtered out
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array representing a mask where pixels with similar red, green, and blue values have been masked out.
"""
rgb = rgb.astype(np.int)
rg_diff = abs(rgb[:, :, 0] - rgb[:, :, 1]) <= tolerance
rb_diff = abs(rgb[:, :, 0] - rgb[:, :, 2]) <= tolerance
gb_diff = abs(rgb[:, :, 1] - rgb[:, :, 2]) <= tolerance
result = ~(rg_diff & rb_diff & gb_diff)
if output_type == "bool":
pass
elif output_type == "float":
result = result.astype(float)
else:
result = | |
from __future__ import print_function
import httplib2
import os
import json
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
import pandas as pd
from prospecting.env import (PROJECTNAME,
CREDSDIR,
CLIENT_SECRET_FILE,
DATADIR,
NOAUTH_LOCAL_WEBSERVER
)
try:
import argparse
parser = argparse.ArgumentParser(parents=[tools.argparser])
parser.set_defaults(noauth_local_webserver=NOAUTH_LOCAL_WEBSERVER)
flags = parser.parse_known_args()[0]
except ImportError:
flags = None
import logging
log = logging.getLogger('prospecting.api')
class GoogleApi:
"""Base class for interfacing with Google APIs
https://developers.google.com/apis-explorer/
"""
def __init__(self, apiname, apiversion, scopelist):
"""Initialize GoogleApi base class
Args:
apiname (str): Name of Google API, example: 'sheets'
apiversion (str): Version of API, example: 'v4'
scopelist (list): List of authorization scopes, example: []
"""
self.api_name = apiname
self.api_version = apiversion
self.api_id = (self.api_name + ":" + self.api_version)
self.api_scope = scopelist
#self.discovery_url = ('https://' + self.api_name + '.googleapis.com/$discovery/rest?'
# 'version=' + self.api_version)
self.discovery_url = ('https://www.googleapis.com/discovery/v1/apis/' + self.api_name +
'/' + self.api_version + '/rest')
self.api_info = self._discover_api(self.discovery_url)
def authenticate(self):
log.info('Authenticating...{0}, {1}'.format(self.api_name, self.api_version))
self.credentials = self._get_credentials(self.api_scope)
self.http = self.credentials.authorize(httplib2.Http())
service = self._build_service_object()
log.info('Successfully authenticated...{0}, {1}'.format(self.api_name, self.api_version))
return service
def reauthenticate(self, scopelist):
if os.path.isfile(self.credential_path):
os.remove(self.credential_path)
self.api_scope = scopelist
self.authenticate()
def _get_credentials(self, scopelist):
log.info('Getting credentials...')
credsfile = ('googleapis.' + self.api_name + '.' + PROJECTNAME + '.json')
self.credential_path = os.path.join(CREDSDIR, credsfile)
self.store = Storage(self.credential_path)
file_exists = os.path.isfile(self.credential_path)
scopes_match = False
if file_exists:
with open(self.credential_path) as f:
credjson = json.load(f)
scopes_match = set(credjson['scopes']) == set(scopelist)
if scopes_match:
creds = self.store.get()
else:
creds = None
if (not creds or creds.invalid):
creds = self._run_credentials_flow()
return creds
def _run_credentials_flow(self):
log.info('Running credentials flow...')
secretspath = os.path.join(CREDSDIR, CLIENT_SECRET_FILE)
flow = client.flow_from_clientsecrets(secretspath, self.api_scope)
flow.user_agent = PROJECTNAME
if flags or flags is None:
self.credentials = tools.run_flow(flow, self.store, flags)
else: # Needed only for compatibility with Python 2.6
self.credentials = tools.run(self.flow, self.store)
log.info('Storing credentials to {0}'.format(self.credential_path))
return self.credentials
def _build_service_object(self):
log.info('Building service object...')
service_object = discovery.build(self.api_name,
self.api_version,
http=self.http,
discoveryServiceUrl=self.discovery_url)
log.info('Service object built...{0}'.format(service_object))
return service_object
def _discover_api(self, discoveryurl):
discovery_file = os.path.join(DATADIR,
'discoveryapi_' + self.api_name + '.json')
if os.path.isfile(discovery_file):
log.info(('Reading discovery file for {0}').format(self.api_id))
with open(discovery_file) as f:
disco_info = json.load(f)
else:
h = httplib2.Http()
resp, content = h.request(discoveryurl, 'GET')
log.info(('Resp from 1st discoveryurl attempt: {0}'.format(resp['status'])))
if resp['status'] == '404':
DISCOVERY_URI = 'https://www.googleapis.com/discovery/v1/apis?preferred=true'
resp2, content2 = h.request(DISCOVERY_URI, 'GET')
disco_all = json.loads(content2.decode())
disco_api = [apiinfo for apiinfo in disco_all['items'] for k, v in apiinfo.items() if v == self.api_id][0]
self.discovery_url = disco_api['discoveryRestUrl']
resp, content = h.request(self.discovery_url, 'GET')
if resp['status'] == '404':
try:
raise Exception(resp['status'])
except Exception as e:
log.error('Error response in 2nd discoveryurl attempt: {0}'.format(e))
assert resp['status'] != '404'
else:
disco_info = json.loads(content.decode())
print(disco_info)
log.info(('Resp from 2nd discoveryurl attempt: {0}'.format(resp['status'])))
disco_info = json.loads(content.decode())
log.info(('Writing discovery file for {0}').format(self.api_id))
with open(discovery_file, 'w') as outfile:
json.dump(json.loads(content.decode()), outfile)
log.info('Read from api, write to file complete. Check new file in' + discovery_file)
return disco_info
class SheetsApi(GoogleApi):
"""Class for SheetsApi object
https://developers.google.com/resources/api-libraries/documentation/sheets/v4/python/latest/
https://developers.google.com/apis-explorer/#p/sheets/v4/
"""
def __init__(self,
apiname='sheets',
apiversion='v4',
spreadsheetid=None,
sheetrange=None,
scopelist=['https://www.googleapis.com/auth/spreadsheets.readonly',
'https://www.googleapis.com/auth/drive.readonly']):
self.spreadsheet_id = spreadsheetid
self.sheet_range = sheetrange
self.info = None # reserved for metadata
self.sheets = {} # store data from get requests
GoogleApi.__init__(self, apiname, apiversion, scopelist)
pass
def authenticate(self):
self.service = GoogleApi.authenticate(self)
def get_ss_info(self, sheetranges=None, includegriddata=False):
"""Returns the spreadsheet from a given ID
Params:
sheetranges (list): List of comma separated range names as strings, Ex: ['Sheet1','Sheet2!A1:B5]
includegriddata (bool): True if grid data should be returned, Ex: True
Returns:
{
"spreadsheetId": "1MdZzXvqftMJTfLdbBpzYJA42kCv9R6SSEAT5tSUNe5g",
"properties": {
...
},
"sheets": [
{
"properties": {
...
},
...
}
],
"spreadsheetUrl": "https://docs.google.com/spreadsheets/d/.../edit"
}
"""
spreadsheetid = self.spreadsheet_id
if sheetranges is None:
if spreadsheetid is None:
raise ValueError('Please set self.spreadsheet_id')
response = self.service.spreadsheets().get(
spreadsheetId=spreadsheetid,
includeGridData=includegriddata
).execute()
log.info('Spreadsheet loaded.')
log.info('Sheets include: {0}'.format([sheet['properties']['title'] for sheet in response['sheets']]))
return response
else:
response = self.service.spreadsheets().get(
spreadsheetId=spreadsheetid,
ranges=sheetranges,
includeGridData=includegriddata
).execute()
return response
def get(self,
sheetrange=None,
asdataframe=True,
headerrow=0,
majordimension='ROWS',
valuerenderoption='FORMATTED_VALUE',
datetimerenderoption='SERIAL_NUMBER'):
"""Returns one range of values from a spreadsheet.
Params:
sheetrange (str): Name of range to get, Ex: ['Sheet1']
asdataframe (bool): Flag to determine response type, Ex: False
headerrow (int): Specifies location of header, Ex: 2
majordimension (str): The major dimension that results should use, Ex 'COLUMNS'
valuerenderoption (str): How values should be represented in the output, Ex: 'UNFORMATTED_VALUE'
datetimerenderoption (str): How dates, times, and durations should be represented in the output,
Ex: 'FORMATTED_STRING'
Returns:
Google Sheet as requested
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/get
"""
tmpdf = pd.DataFrame()
spreadsheetid = self.spreadsheet_id
if spreadsheetid is None:
raise ValueError('Please set self.spreadsheet_id')
if not sheetrange:
sheetrange = self.sheet_range
self.response = self.service.spreadsheets().values().get(
spreadsheetId=spreadsheetid,
range=sheetrange,
majorDimension=majordimension,
valueRenderOption=valuerenderoption,
dateTimeRenderOption=datetimerenderoption
).execute()
values = self.response.get('values', None)
if not values:
log.info('No data found.')
tmpdf = None
else:
if headerrow is not None:
if asdataframe is True:
try:
tmpdf = pd.DataFrame.from_records(values[(headerrow + 1):len(values)],
columns=values[headerrow])
except AssertionError as err:
print('AssertionError: {0}'.format(err))
print('No columns in headerrow. Add columns to sheet or pass headerrow=None.')
print('Check self.data for malformed response (no columns set).')
else:
tmpdf = values[(headerrow + 1)]
else:
if asdataframe is True:
tmpdf = pd.DataFrame.from_records(values[0:len(values)])
else:
tmpdf = values[0:len(values)]
return (tmpdf)
def batchGet(self,
sheetranges,
majordimension='ROWS',
valuerenderoption='FORMATTED_VALUE',
datetimerenderoption='SERIAL_NUMBER'):
"""Returns one or more ranges of values from a spreadsheet.
Params:
sheetranges (list): List of comma separated range names as strings, Ex: ['Sheet1','Sheet2!A1:B5]
majordimension (str): The major dimension that results should use, Ex 'COLUMNS'
valuerenderoption (str): How values should be represented in the output, Ex: 'UNFORMATTED_VALUE'
datetimerenderoption (str): How dates, times, and durations should be represented in the output,
Ex: 'FORMATTED_STRING'
Returns:
List of tuples, one tuple for each range requested, Ex: [('col_1, col_2), ]
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/batchGet
"""
spreadsheetid = self.spreadsheet_id
if spreadsheetid is None:
raise ValueError('Please set self.spreadsheet_id')
if not sheetranges:
sheetranges = self.sheet_range
self.response = self.service.spreadsheets().values().batchGet(
spreadsheetId=spreadsheetid,
ranges=sheetranges,
majorDimension=majordimension,
valueRenderOption=valuerenderoption,
dateTimeRenderOption=datetimerenderoption
).execute()
values = {vr['range']: vr.get('values', []) for vr in self.response['valueRanges']}
if not values:
print('No data found.')
return {k: v for k, v in values.items()}
def update(self,
dataframe,
sheetrange,
majordimension='ROWS',
valueinputoption='RAW',
includevaluesinresponse=False,
responsevaluerenderoption='FORMATTED_VALUE',
responsedatetimerenderoption='SERIAL_NUMBER'):
"""Sets values in a range of a spreadsheet
Params:
sheetrange (str): Name of range to get,
Ex: ['Sheet1']
valueinputoption (str): How the input data should be interpreted,
Ex: 'USER_ENTERED'
includevaluesinresponse (bool): Determines if the update response should include
the values of the cells that were updated,
Ex. False
responsevaluerenderoption (str): Determines how values in the response
should be rendered,
Ex: 'UNFORMATTED_VALUE'
responsedatetimerenderoption (str): Determines how dates, times, and durations
in the response should be rendered,
Ex: 'FORMATTED_STRING'
Returns:
response, returns "UpdateValuesResponse" in format:
{
"spreadsheetId": string,
"updatedRange": string,
"updatedRows": number,
"updatedColumns": number,
"updatedCells": number,
"updatedData": {
object(ValueRange)
},
}
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update
"""
spreadsheetid = self.spreadsheet_id
data = {
"range": sheetrange,
"majorDimension": majordimension,
"values":
[(dataframe.columns.values.tolist())] + (dataframe.values.tolist())
}
self.response = self.service.spreadsheets().values().update(
spreadsheetId=spreadsheetid,
range=sheetrange,
valueInputOption=valueinputoption,
#includeValuesInResponse=includevaluesinresponse,
#responseValueRenderOption=responsevaluerenderoption,
#responseDateTimeRenderOption=responsedatetimerenderoption,
body=data
).execute()
if not self.response:
log.info('Update Failed!')
else:
log.info('Update Successful!')
def batchUpdate(self,):
pass
def clear(self):
pass
def batchClear(self):
pass
def append(self,
dataframe,
sheetrange,
majordimension='ROWS',
valueinputoption='RAW',
insertdataoption='INSERT_ROWS',
includevaluesinresponse=False,
responsevaluerenderoption='FORMATTED_VALUE',
responsedatetimerenderoption='SERIAL_NUMBER'):
"""Append values to a spreadsheet
Params:
sheetrange (str): The A1 notation of a range to search for a logical table of data.
Values will be appended after the last row of the table,
Ex: 'Sheet1'
valueinputoption (str): How the input data should be interpreted, Ex: 'USER_ENTERED'
insertdataoption (str): How the input data should be inserted, Example 'OVERWRITE'
includevaluesinresponse (bool): Determines if the update response should
include the values of the cells that were appended, Ex: False
responsevaluerenderoption (str): Determines how values in the response should be rendered,
Ex: 'UNFORMATTED_VALUE'
responsedatetimerenderoption (str): Determines how dates, times, and durations in the response
should be rendered,
Ex: 'FORMATTED_STRING'
Returns:
response, returns response body in format:
{
"spreadsheetId": string,
"tableRange": string,
"updates": {
object(UpdateValuesResponse)
},
}
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/append
"""
spreadsheetid = self.spreadsheet_id
data = {
"range": sheetrange,
"majorDimension": majordimension,
"values": dataframe.values.tolist()
#[(dataframe.columns.values.tolist())] + (dataframe.values.tolist())
}
self.response = self.service.spreadsheets().values().append(
spreadsheetId=spreadsheetid,
range=sheetrange,
valueInputOption='RAW',
body=data
).execute()
if not self.response:
log.info('No data found.')
else:
log.info('Append Successful!')
def extract_sheet_names(self):
pass
def load_sheets(self, sheetslist, batch=None):
data = {}
if batch is None:
batch = self.batchGet(sheetslist)
for s in sheetslist:
tmp = [value for key, value in batch.items() if s in key][0]
if tmp is None:
data[s] = tmp
else:
try:
data[s] = pd.DataFrame.from_records(tmp[1:len(tmp[1])],
columns=tmp[0][0:len(tmp[1])])
except:
log.warning('Failed to load dataframe, returning tmp')
data[s] = tmp
return (data)
class DriveApi(GoogleApi):
"""Class for DriveApi | |
"""Bounding boxes transformation functions."""
from __future__ import division
import copy
import numpy as np
import random
from utils.bbox import bbox_iou
__all__ = ['random_crop_with_constraints', 'crop', 'flip', 'resize', 'translate']
def random_crop_with_constraints(bboxs, size, min_scale=0.3, max_scale=1,
max_aspect_ratio=2, constraints=None,
max_trial=50):
"""Crop an image randomly with bounding box constraints.
This data augmentation is used in training of
Single Shot Multibox Detector [#]_. More details can be found in
data augmentation section of the original paper.
.. [#] <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>.
SSD: Single Shot MultiBox Detector. ECCV 2016.
Parameters
----------
bboxs : numpy.ndarray
Numpy.ndarray with shape (N, 4+) where N is the number of bounding boxes.
The second axis represents attributes of the bounding box.
Specifically, these are :math:`(x_{min}, y_{min}, x_{max}, y_{max})`,
we allow additional attributes other than coordinates, which stay intact
during bounding box transformations.
size : tuple
Tuple of length 2 of image shape as (width, height).
min_scale : float
The minimum ratio between a cropped region and the original image.
The default value is :obj:`0.3`.
max_scale : float
The maximum ratio between a cropped region and the original image.
The default value is :obj:`1`.
max_aspect_ratio : float
The maximum aspect ratio of cropped region.
The default value is :obj:`2`.
constraints : iterable of tuples
An iterable of constraints.
Each constraint should be :obj:`(min_iou, max_iou)` format.
If means no constraint if set :obj:`min_iou` or :obj:`max_iou` to :obj:`None`.
If this argument defaults to :obj:`None`, :obj:`((0.1, None), (0.3, None),
(0.5, None), (0.7, None), (0.9, None), (None, 1))` will be used.
max_trial : int
Maximum number of trials for each constraint before exit no matter what.
Returns
-------
numpy.ndarray
Cropped bounding boxes with shape :obj:`(M, 4+)` where M <= N.
tuple
Tuple of length 4 as (x_offset, y_offset, new_width, new_height).
"""
# default params in paper
if constraints is None:
constraints = (
(0.1, None),
(0.3, None),
(0.5, None),
(0.7, None),
(0.9, None),
(None, 1),
)
w, h = size
td = False
if not isinstance(bboxs, list):
bboxs = [bboxs]
td = True
candidates = [(0, 0, w, h)]
for min_iou, max_iou in constraints:
min_iou = -np.inf if min_iou is None else min_iou
max_iou = np.inf if max_iou is None else max_iou
for _ in range(max_trial):
scale = random.uniform(min_scale, max_scale)
aspect_ratio = random.uniform(
max(1 / max_aspect_ratio, scale * scale),
min(max_aspect_ratio, 1 / (scale * scale)))
crop_h = int(h * scale / np.sqrt(aspect_ratio))
crop_w = int(w * scale * np.sqrt(aspect_ratio))
crop_t = random.randrange(h - crop_h)
crop_l = random.randrange(w - crop_w)
crop_bb = np.array((crop_l, crop_t, crop_l + crop_w, crop_t + crop_h))
emp = True
for bbox in bboxs:
if len(bbox) != 0:
emp = False
if emp: # all have to be empty, then we will just random crop image
top, bottom = crop_t, crop_t + crop_h
left, right = crop_l, crop_l + crop_w
if td:
bboxs = bboxs[0]
return bboxs, (left, top, right-left, bottom-top)
cand = True
for bbox in bboxs:
iou = bbox_iou(bbox, crop_bb[np.newaxis])
if min_iou > iou.min() or iou.max() > max_iou:
cand = False
if cand:
top, bottom = crop_t, crop_t + crop_h
left, right = crop_l, crop_l + crop_w
candidates.append((left, top, right-left, bottom-top))
break
# random select one
while candidates:
crop_b = candidates.pop(np.random.randint(0, len(candidates)))
new_bboxs = crop(bboxs, crop_b, allow_outside_center=False)
for new_bbox in new_bboxs:
if new_bbox.size < 1: # if any are empty try again?
continue
new_crop = (crop_b[0], crop_b[1], crop_b[2], crop_b[3])
return new_bboxs, new_crop
return bboxs, (0, 0, w, h)
def crop(bboxs, crop_box=None, allow_outside_center=True):
"""Crop bounding boxes according to slice area.
This method is mainly used with image cropping to ensure bonding boxes fit
within the cropped image.
Parameters
----------
bboxs : numpy.ndarray or list
Numpy.ndarray with shape (N, 4+) list of len T, where N is the number of bounding boxes and T is num timesteps.
The second axis represents attributes of the bounding box.
Specifically, these are :math:`(x_{min}, y_{min}, x_{max}, y_{max})`,
we allow additional attributes other than coordinates, which stay intact
during bounding box transformations.
crop_box : tuple
Tuple of length 4. :math:`(x_{min}, y_{min}, width, height)`
allow_outside_center : bool
If `False`, remove bounding boxes which have centers outside cropping area.
Returns
-------
numpy.ndarray
Cropped bounding boxes with shape (M, 4+) where M <= N.
"""
td = False
if not isinstance(bboxs, list):
bboxs = [bboxs]
td = True
bboxs = copy.deepcopy(bboxs)
for bbox in bboxs:
if crop_box is None:
break
if not len(crop_box) == 4:
raise ValueError(
"Invalid crop_box parameter, requires length 4, given {}".format(str(crop_box)))
if sum([int(c is None) for c in crop_box]) == 4:
break
l, t, w, h = crop_box
left = l if l else 0
top = t if t else 0
right = left + (w if w else np.inf)
bottom = top + (h if h else np.inf)
crop_bbox = np.array((left, top, right, bottom))
if allow_outside_center:
mask = np.ones(bbox.shape[0], dtype=bool)
else:
centers = (bbox[:, :2] + bbox[:, 2:4]) / 2
mask = np.logical_and(crop_bbox[:2] <= centers, centers < crop_bbox[2:]).all(axis=1)
# transform borders
bbox[:, :2] = np.maximum(bbox[:, :2], crop_bbox[:2])
bbox[:, 2:4] = np.minimum(bbox[:, 2:4], crop_bbox[2:4])
bbox[:, :2] -= crop_bbox[:2]
bbox[:, 2:4] -= crop_bbox[:2]
mask = np.logical_and(mask, (bbox[:, :2] < bbox[:, 2:4]).all(axis=1))
bbox = bbox[mask]
if td:
bboxs = bboxs[0]
return bboxs
def flip(bboxs, size, flip_x=False, flip_y=False):
"""Flip bounding boxes according to image flipping directions.
Parameters
----------
bboxs : numpy.ndarray or list
Numpy.ndarray with shape (N, 4+) list of len T, where N is the number of bounding boxes and T is num timesteps.
The second axis represents attributes of the bounding box.
Specifically, these are :math:`(x_{min}, y_{min}, x_{max}, y_{max})`,
we allow additional attributes other than coordinates, which stay intact
during bounding box transformations.
size : tuple
Tuple of length 2: (width, height).
flip_x : bool
Whether flip horizontally.
flip_y : type
Whether flip vertically.
Returns
-------
numpy.ndarray
Flipped bounding boxes with original shape.
"""
if not len(size) == 2:
raise ValueError("size requires length 2 tuple, given {}".format(len(size)))
width, height = size
td = False
if not isinstance(bboxs, list):
bboxs = [bboxs]
td = True
bboxs = copy.deepcopy(bboxs)
for bbox in bboxs:
if flip_y:
ymax = height - bbox[:, 1]
ymin = height - bbox[:, 3]
bbox[:, 1] = ymin
bbox[:, 3] = ymax
if flip_x:
xmax = width - bbox[:, 0]
xmin = width - bbox[:, 2]
bbox[:, 0] = xmin
bbox[:, 2] = xmax
if td:
bboxs = bboxs[0]
return bboxs
def resize(bboxs, in_size, out_size):
"""Resize bouding boxes according to image resize operation.
Parameters
----------
bbox : numpy.ndarray or list
Numpy.ndarray with shape (N, 4+) list of len T, where N is the number of bounding boxes and T is num timesteps.
The second axis represents attributes of the bounding box.
Specifically, these are :math:`(x_{min}, y_{min}, x_{max}, y_{max})`,
we allow additional attributes other than coordinates, which stay intact
during bounding box transformations.
in_size : tuple
Tuple of length 2: (width, height) for input.
out_size : tuple
Tuple of length 2: (width, height) for output.
Returns
-------
numpy.ndarray
Resized bounding boxes with original shape.
"""
if not len(in_size) == 2:
raise ValueError("in_size requires length 2 tuple, given {}".format(len(in_size)))
if not len(out_size) == 2:
raise ValueError("out_size requires length 2 tuple, given {}".format(len(out_size)))
td = False
if not isinstance(bboxs, list):
bboxs = [bboxs]
td = True
bboxs = copy.deepcopy(bboxs)
for bbox in bboxs:
x_scale = out_size[0] / in_size[0]
y_scale = out_size[1] / in_size[1]
bbox[:, 1] = y_scale * bbox[:, 1]
bbox[:, 3] = y_scale * bbox[:, 3]
bbox[:, 0] = x_scale * bbox[:, 0]
bbox[:, 2] = x_scale * bbox[:, 2]
if td:
bboxs = bboxs[0]
return bboxs
def translate(bboxs, x_offset=0, y_offset=0):
"""Translate bounding boxes by offsets.
Parameters
----------
bboxs : numpy.ndarray or list
Numpy.ndarray with shape (N, 4+) list of len T, where N is the number of bounding boxes and T is num timesteps.
The last axis represents attributes of the bounding box.
Specifically, these are :math:`(x_{min}, y_{min}, x_{max}, y_{max})`,
we allow additional attributes other than coordinates, | |
from functools import reduce
import numpy as np
from sparse import SparseTensor
def reduce_prod(seq): return reduce(lambda item_1, item_2: item_1 * item_2, seq)
class Polynomial:
def __init__(self, coeff, indices, merge=True):
"""\\sum_{i=0}^{N-1} coeff[i] \\Pi_{j=0}^{NV-1} x_j^{indices[i, j]}"""
self.degree = np.max(np.sum(indices, axis=-1))
self.n_elements = indices.shape[-1]
if merge:
self.coeff, self.indices = SparseTensor.merge(coeff, indices)
else:
self.coeff, self.indices = coeff, indices
def __call__(self, x):
coeff = np.reshape(self.coeff, newshape=(1, -1))
x = np.reshape(x, newshape=(-1, 1, self.n_elements))
indices = np.reshape(self.indices, newshape=(1, -1, self.n_elements))
return np.sum(coeff * np.prod(np.power(x, indices), axis=2), axis=1)
def __str__(self): return '\n'.join(["{:.2f}\t{}".format(c, index) for c, index in zip(self.coeff, self.indices)])
def __neg__(self): return Polynomial(-self.coeff, self.indices, merge=False)
def __add__(self, other):
if type(other).__name__ in ["bool", "int", "float", "int64", "float64"]:
other = Polynomial(np.array([other, ]), np.zeros(shape=(1, self.n_elements), dtype=self.indices.dtype), merge=False)
return self.__add__(other)
elif isinstance(other, Polynomial):
assert self.n_elements == other.n_elements
return Polynomial(np.hstack([self.coeff, other.coeff]), np.vstack([self.indices, other.indices]), merge=True)
else:
raise ValueError
def __sub__(self, other): return self.__add__(other.__neg__())
def __mul__(self, other):
if type(other).__name__ in ["bool", "int", "float", "int64", "float64"]:
return Polynomial(self.coeff * other, self.indices, merge=False)
elif isinstance(other, Polynomial):
assert self.n_elements == other.n_elements
coeff = np.expand_dims(self.coeff, axis=0) * np.expand_dims(other.coeff, axis=1)
coeff = coeff.flatten()
indices = np.expand_dims(self.indices, axis=0) + np.expand_dims(other.indices, axis=1)
indices = np.reshape(indices, newshape=(-1, self.n_elements))
return Polynomial(coeff, indices, merge=True)
else:
raise ValueError
def derivative(self, order=1):
"""
+----------+-----------------+--------------+
| item | data type | shape |
+----------+-----------------+--------------+
| order | int | [] |
| return | PolynomialArray | [ND] * order |
+----------+-----------------+--------------+
"""
array = [self]
for _ in range(order):
collection = []
for poly in array:
for i in range(self.indices.shape[1]):
coeff = poly.coeff * poly.indices[:, i]
indices = np.maximum(poly.indices - np.eye(poly.n_elements, dtype=poly.indices.dtype)[[i], :], 0)
collection.append(Polynomial(coeff, indices, merge=True))
array = collection
return PolynomialArray(array, shape=[self.indices.shape[1]] * order)
def directional_derivative(self, c, order=1):
"""
+----------+---------------+--------------+
| item | data type | shape |
+----------+---------------+--------------+
| order | numpy.ndarray | [ND] * order |
| order | int | [] |
| return | Polynomial | [ND] * order |
+----------+---------------+--------------+
return: \\sum_{ij...} c_{ij...} \\frac{\\partial^ self}{\partial \lambda_i \partial \lambda_j ...}
"""
coeff = self.coeff
indices = self.indices
dim = self.n_elements
for axis in range(order):
coeff = np.expand_dims(coeff, axis=0) * np.transpose(indices, axes=[-1] + list(range(axis+1)))
indices = np.expand_dims(indices, axis=0) - np.expand_dims(np.eye(dim, dtype=np.int), axis=list(range(1, axis + 2)))
indices = np.maximum(indices, 0)
coeff = (np.expand_dims(c, axis=-1) * coeff).flatten()
indices = np.reshape(indices, newshape=(-1, dim))
return Polynomial(coeff, indices, merge=True)
class PolynomialArray:
def __init__(self, array, shape): self.array, self.shape = array, list(shape)
def reshape(self, shape):
shape = list(shape)
for axis in range(shape.__len__()):
if shape[axis] == -1:
shape[axis] = -reduce_prod(self.shape) // reduce_prod(shape)
break
return PolynomialArray(self.array, shape)
def transpose(self, axes):
transpose_indices = np.transpose(np.reshape(np.arange(self.array.__len__()), newshape=self.shape), axes=axes)
array = [self.array[index] for index in transpose_indices.flatten()]
shape = [self.shape[axis] for axis in axes]
return PolynomialArray(array, shape)
def sum(self, axis, keep_dim=False):
axes = [axis] + [ax for ax in range(self.shape.__len__()) if ax != axis]
transpose_array = self.transpose(axes)
result = reduce(lambda u, v: u + v, [transpose_array[k] for k in range(transpose_array.shape[0])])
if keep_dim:
result.shape.insert(axis, 1)
return result
def __call__(self, x): return np.reshape(np.stack([poly(x) for poly in self.array], axis=1), newshape=[-1] + self.shape)
def __getitem__(self, item):
valid_indices = np.reshape(np.arange(self.array.__len__()), newshape=self.shape)[item]
array = [self.array[index] for index in valid_indices.flatten()]
shape = valid_indices.shape
return array[0] if shape == () else PolynomialArray(array, shape)
def __eq__(self, other): return (self.shape == other.shape) and sum([sp != op for sp, op in zip(self.array, other.array)]) == 0
def __neg__(self): return PolynomialArray([-array for array in self.array], self.shape)
def __add__(self, other): # TODO: in large scale calculation, this operator works slowly in serial mode.
if type(other).__name__ in ["bool", "int", "float", "Polynomial"]:
array = PolynomialArray([sa + other for sa in self.array], self.shape)
return array.reshape(self.shape)
elif isinstance(other, np.ndarray):
n_elements, dtype = self.array[0].n_elements, self.array[0].indices.dtype
arr = [Polynomial(np.array([item, ]), np.zeros(shape=(1, n_elements), dtype=dtype)) for item in other.flatten()]
return self.__add__(PolynomialArray(arr, shape=other.shape))
elif isinstance(other, PolynomialArray):
self_indices = np.reshape(np.arange(np.prod(self.shape)), self.shape)
o_indices = np.reshape(np.arange(np.prod(other.shape)), other.shape)
self_indices, o_indices = self_indices + np.zeros_like(o_indices), o_indices + np.zeros_like(self_indices)
array = [self.array[si] + other.array[oi] for si, oi in zip(self_indices.flatten(), o_indices.flatten())]
return PolynomialArray(array, shape=self_indices.shape)
else:
raise ValueError
def __sub__(self, other): return self.__add__(other.__neg__())
def __mul__(self, other): # TODO: in large scale calculation, this operator works slowly in serial mode.
if type(other).__name__ in ["bool", "int", "float", "Polynomial"]:
array = PolynomialArray([sa * other for sa in self.array], self.shape)
return array.reshape(self.shape)
elif isinstance(other, np.ndarray):
n_elements, dtype = self.array[0].n_elements, self.array[0].indices.dtype
arr = [Polynomial(np.array([item, ]), np.zeros(shape=(1, n_elements), dtype=dtype)) for item in other.flatten()]
return self.__mul__(PolynomialArray(arr, shape=other.shape))
elif isinstance(other, PolynomialArray):
self_indices = np.reshape(np.arange(np.prod(self.shape)), self.shape)
o_indices = np.reshape(np.arange(np.prod(other.shape)), other.shape)
self_indices, o_indices = self_indices + np.zeros_like(o_indices), o_indices + np.zeros_like(self_indices)
array = [self.array[si] * other.array[oi] for si, oi in zip(self_indices.flatten(), o_indices.flatten())]
return PolynomialArray(array, shape=self_indices.shape)
else:
raise ValueError
@classmethod
def stack(cls, arrays, axis):
axis %= arrays[0].shape.__len__() + 1
array = sum([item.array for item in arrays], [])
shape = [arrays.__len__()] + list(arrays[0].shape)
axes = [i for i in range(shape.__len__()) if i != axis]
axes.insert(axis, 0)
return PolynomialArray(array, shape).transpose(axes)
@classmethod
def concat(cls, arrays, axis):
axes = [axis] + [i for i in range(arrays[0].shape.__len__()) if i != axis]
shape = [-1] + [dim for i, dim in enumerate(arrays[0].shape) if i != axis]
arrays = sum([cls.transpose(array, axes).array for array in arrays], [])
arrays = cls(arrays, shape=(arrays.__len__(), ))
arrays = arrays.reshape(shape)
axes = list(range(1, shape.__len__()))
axes.insert(axis, 0)
return arrays.transpose(axes)
def derivative(self, order=1):
"""
+----------+-----------------+---------------------------+
| item | data type | shape |
+----------+-----------------+---------------------------+
| order | int | [] |
| return | PolynomialArray | self.shape + [ND] * order |
+----------+-----------------+---------------------------+
"""
array = PolynomialArray.stack([poly.derivative(order) for poly in self.array], axis=0)
return array.reshape(self.shape + array.shape[1:])
def directional_derivative(self, c, order=1):
"""
+----------+-----------------+---------------------------+
| item | data type | shape |
+----------+-----------------+---------------------------+
| c | numpy.ndarray | self.shape + [ND] * order |
| order | int | [] |
| return | numpy.ndarray | self.shape |
+----------+-----------------+---------------------------+
return: \\sum_{ij...} c_{ij...}^{uv...} \\frac{\\partial^ self_{uv...}}{\partial \lambda_i \partial \lambda_j ...}
"""
ni = max([p.coeff.__len__() for p in self.array])
dim = self.array[0].n_elements
coeff = [np.concatenate([p.coeff, np.zeros(shape=(ni - p.coeff.__len__(), ))], axis=0) for p in self.array]
coeff = np.stack(coeff, axis=1) # shape = [NI, ?]
indices = [np.concatenate([p.indices, np.zeros(shape=(ni - p.coeff.__len__(), dim), dtype=np.int)], axis=0) for p in self.array]
indices = np.stack(indices, axis=2) # shape = [NI, ND, ?]
for axis in range(order):
axes = [axis + 1] + [i for i in range(axis + 3) if i != axis + 1]
coeff = np.expand_dims(coeff, axis=0) * np.transpose(indices, axes=axes)
axes = list(range(1, axis + 2)) + [axis + 3]
indices = np.expand_dims(indices, axis=0) - np.expand_dims(np.eye(dim, dtype=np.int), axis=axes)
indices = np.maximum(indices, 0)
c = np.reshape(c, newshape=[-1, 1] + [dim] * order)
c = np.transpose(c, axes=list(range(2, order + 2)) + [1, 0]) # shape = [ND] * order + [1] + [?]
coeff = np.reshape((c * coeff), newshape=(dim ** order * ni, -1)) # shape = [ND] * order + [NI] + [?]
indices = np.reshape(indices, newshape=(dim ** order * ni, dim, -1)) # shape = [ND] * order + [NI] + [ND] + [?]
return PolynomialArray([Polynomial(coeff[:, i], indices[:, :, i], merge=True) for i in range(coeff.shape[-1])], shape=self.shape)
def integral(self, dim, determinant):
"""
Working correctly in triangulation grid only!
\Pi_i \alpha_i!
\int_K \Pi_i \lambda_i^{\alpha_i} dx = ------------------------ * determinant
(dim + \Sum_i \alpha_i)!
"""
ni = max([p.coeff.__len__() for p in self.array])
nd = self.array[0].n_elements
coeff = [np.concatenate([p.coeff, np.zeros(shape=(ni - p.coeff.__len__(), ))], axis=0) for p in self.array]
coeff = np.stack(coeff, axis=1) # shape = [NI, ?]
indices = [np.concatenate([p.indices, np.zeros(shape=(ni - p.coeff.__len__(), nd), dtype=np.int)], axis=0) for p in self.array]
indices = np.stack(indices, axis=2) # shape = [NI, ND, ?]
degree = np.max(indices)
if degree == 0:
numerator = np.ones_like(indices) # shape = [NI, ND, ?]
else:
numerator = reduce_prod([np.maximum(indices - i, 1) for i in range(degree)]) # shape = [NI, ND, ?]
numerator = np.prod(numerator, axis=1) # shape = [NI, ?]
denominator = np.sum(indices, axis=1) + dim # shape = [NI, ?]
denominator = reduce_prod([np.maximum(denominator - i, 1) for i in range(degree + dim)]) # shape = [NI, ?]
return np.reshape(np.sum(coeff * numerator / denominator, axis=0), newshape=self.shape) * determinant
def unit_test():
np.set_printoptions(precision=2)
x = np.random.rand(4, 3)
const_array = np.random.rand(8, 7)
# item 6, | |
end = team_sizes[team]
child_perf_vars = perf_vars[start:end]
coeffs = flatten_weights[start:end]
yield SumFactor(team_perf_var, child_perf_vars, coeffs)
def build_team_diff_layer():
for team, team_diff_var in enumerate(team_diff_vars):
yield SumFactor(team_diff_var,
team_perf_vars[team:team + 2], [+1, -1])
def build_trunc_layer():
for x, team_diff_var in enumerate(team_diff_vars):
if callable(self.draw_probability):
# dynamic draw probability
team_perf1, team_perf2 = team_perf_vars[x:x + 2]
args = (Rating(team_perf1), Rating(team_perf2), self)
draw_probability = self.draw_probability(*args)
else:
# static draw probability
draw_probability = self.draw_probability
size = sum(map(len, rating_groups[x:x + 2]))
draw_margin = calc_draw_margin(draw_probability, size, self)
if ranks[x] == ranks[x + 1]: # is a tie?
v_func, w_func = self.v_draw, self.w_draw
else:
v_func, w_func = self.v_win, self.w_win
yield TruncateFactor(team_diff_var,
v_func, w_func, draw_margin)
# build layers
return (build_rating_layer, build_perf_layer, build_team_perf_layer,
build_team_diff_layer, build_trunc_layer)
def run_schedule(self, build_rating_layer, build_perf_layer,
build_team_perf_layer, build_team_diff_layer,
build_trunc_layer, min_delta=DELTA):
"""Sends messages within every nodes of the factor graph until the
result is reliable.
"""
if min_delta <= 0:
raise ValueError('min_delta must be greater than 0')
layers = []
def build(builders):
layers_built = [list(build()) for build in builders]
layers.extend(layers_built)
return layers_built
# gray arrows
layers_built = build([build_rating_layer,
build_perf_layer,
build_team_perf_layer])
rating_layer, perf_layer, team_perf_layer = layers_built
for f in chain(*layers_built):
f.down()
# arrow #1, #2, #3
team_diff_layer, trunc_layer = build([build_team_diff_layer,
build_trunc_layer])
team_diff_len = len(team_diff_layer)
for x in range(10):
if team_diff_len == 1:
# only two teams
team_diff_layer[0].down()
delta = trunc_layer[0].up()
else:
# multiple teams
delta = 0
for x in range(team_diff_len - 1):
team_diff_layer[x].down()
delta = max(delta, trunc_layer[x].up())
team_diff_layer[x].up(1) # up to right variable
for x in range(team_diff_len - 1, 0, -1):
team_diff_layer[x].down()
delta = max(delta, trunc_layer[x].up())
team_diff_layer[x].up(0) # up to left variable
# repeat until to small update
if delta <= min_delta:
break
# up both ends
team_diff_layer[0].up(0)
team_diff_layer[team_diff_len - 1].up(1)
# up the remainder of the black arrows
for f in team_perf_layer:
for x in range(len(f.vars) - 1):
f.up(x)
for f in perf_layer:
f.up()
return layers
def rate(self, rating_groups, ranks=None, weights=None, min_delta=DELTA):
"""Recalculates ratings by the ranking table::
env = TrueSkill() # uses default settings
# create ratings
r1 = env.create_rating(42.222)
r2 = env.create_rating(89.999)
# calculate new ratings
rating_groups = [(r1,), (r2,)]
rated_rating_groups = env.rate(rating_groups, ranks=[0, 1])
# save new ratings
(r1,), (r2,) = rated_rating_groups
``rating_groups`` is a list of rating tuples or dictionaries that
represents each team of the match. You will get a result as same
structure as this argument. Rating dictionaries for this may be useful
to choose specific player's new rating::
# load players from the database
p1 = load_player_from_database('<NAME>')
p2 = load_player_from_database('<NAME>')
p3 = load_player_from_database('<NAME>')
# calculate new ratings
rating_groups = [{p1: p1.rating, p2: p2.rating}, {p3: p3.rating}]
rated_rating_groups = env.rate(rating_groups, ranks=[0, 1])
# save new ratings
for player in [p1, p2, p3]:
player.rating = rated_rating_groups[player.team][player]
:param rating_groups: a list of tuples or dictionaries containing
:class:`Rating` objects.
:param ranks: a ranking table. By default, it is same as the order of
the ``rating_groups``.
:param weights: weights of each players for "partial play".
:param min_delta: each loop checks a delta of changes and the loop
will stop if the delta is less then this argument.
:returns: recalculated ratings same structure as ``rating_groups``.
:raises: :exc:`FloatingPointError` occurs when winners have too lower
rating than losers. higher floating-point precision couls
solve this error. set the backend to "mpmath".
.. versionadded:: 0.2
"""
rating_groups, keys = self.validate_rating_groups(rating_groups)
weights = self.validate_weights(weights, rating_groups, keys)
group_size = len(rating_groups)
if ranks is None:
ranks = range(group_size)
elif len(ranks) != group_size:
raise ValueError('Wrong ranks')
# sort rating groups by rank
by_rank = lambda x: x[1][1]
sorting = sorted(enumerate(zip(rating_groups, ranks, weights)),
key=by_rank)
sorted_rating_groups, sorted_ranks, sorted_weights = [], [], []
for x, (g, r, w) in sorting:
sorted_rating_groups.append(g)
sorted_ranks.append(r)
# make weights to be greater than 0
sorted_weights.append(max(min_delta, w_) for w_ in w)
# build factor graph
args = (sorted_rating_groups, sorted_ranks, sorted_weights)
builders = self.factor_graph_builders(*args)
args = builders + (min_delta,)
layers = self.run_schedule(*args)
# make result
rating_layer, team_sizes = layers[0], _team_sizes(sorted_rating_groups)
transformed_groups = []
for start, end in zip([0] + team_sizes[:-1], team_sizes):
group = []
for f in rating_layer[start:end]:
group.append(Rating(float(f.var.mu), float(f.var.sigma)))
transformed_groups.append(tuple(group))
by_hint = lambda x: x[0]
unsorting = sorted(zip((x for x, __ in sorting), transformed_groups),
key=by_hint)
if keys is None:
return [g for x, g in unsorting]
# restore the structure with input dictionary keys
return [dict(zip(keys[x], g)) for x, g in unsorting]
def quality(self, rating_groups, weights=None):
"""Calculates the match quality of the given rating groups. A result
is the draw probability in the association::
env = TrueSkill()
if env.quality([team1, team2, team3]) < 0.50:
print('This match seems to be not so fair')
:param rating_groups: a list of tuples or dictionaries containing
:class:`Rating` objects.
:param weights: weights of each players for "partial play".
.. versionadded:: 0.2
"""
rating_groups, keys = self.validate_rating_groups(rating_groups)
weights = self.validate_weights(weights, rating_groups, keys)
flatten_ratings = sum(map(tuple, rating_groups), ())
flatten_weights = sum(map(tuple, weights), ())
length = len(flatten_ratings)
# a vector of all of the skill means
mean_matrix = Matrix([[r.mu] for r in flatten_ratings])
# a matrix whose diagonal values are the variances (sigma ** 2) of each
# of the players.
def variance_matrix(height, width):
variances = (r.sigma ** 2 for r in flatten_ratings)
for x, variance in enumerate(variances):
yield (x, x), variance
variance_matrix = Matrix(variance_matrix, length, length)
# the player-team assignment and comparison matrix
def rotated_a_matrix(set_height, set_width):
t = 0
for r, (cur, _next) in enumerate(zip(rating_groups[:-1],
rating_groups[1:])):
for x in range(t, t + len(cur)):
yield (r, x), flatten_weights[x]
t += 1
x += 1
for x in range(x, x + len(_next)):
yield (r, x), -flatten_weights[x]
set_height(r + 1)
set_width(x + 1)
rotated_a_matrix = Matrix(rotated_a_matrix)
a_matrix = rotated_a_matrix.transpose()
# match quality further derivation
_ata = (self.beta ** 2) * rotated_a_matrix * a_matrix
_atsa = rotated_a_matrix * variance_matrix * a_matrix
start = mean_matrix.transpose() * a_matrix
middle = _ata + _atsa
end = rotated_a_matrix * mean_matrix
# make result
e_arg = (-0.5 * start * middle.inverse() * end).determinant()
s_arg = _ata.determinant() / middle.determinant()
return math.exp(e_arg) * math.sqrt(s_arg)
def expose(self, rating):
"""Returns the value of the rating exposure. It starts from 0 and
converges to the mean. Use this as a sort key in a leaderboard::
leaderboard = sorted(ratings, key=env.expose, reverse=True)
.. versionadded:: 0.4
"""
k = self.mu / self.sigma
return rating.mu - k * rating.sigma
def make_as_global(self):
"""Registers the environment as the global environment.
>>> env = TrueSkill(mu=50)
>>> Rating()
trueskill.Rating(mu=25.000, sigma=8.333)
>>> env.make_as_global() #doctest: +ELLIPSIS
trueskill.TrueSkill(mu=50.000, ...)
>>> Rating()
trueskill.Rating(mu=50.000, sigma=8.333)
But if you need just one environment, :func:`setup` is better to use.
"""
return setup(env=self)
def __repr__(self):
c = type(self)
if callable(self.draw_probability):
f = self.draw_probability
draw_probability = '.'.join([f.__module__, f.__name__])
else:
draw_probability = '%.1f%%' % (self.draw_probability * 100)
if self.backend is None:
backend = ''
elif isinstance(self.backend, tuple):
backend = ', backend=...'
else:
backend = ', backend=%r' % self.backend
args = ('.'.join([c.__module__, c.__name__]), self.mu, self.sigma,
self.beta, self.tau, draw_probability, backend)
return ('%s(mu=%.3f, sigma=%.3f, beta=%.3f, tau=%.3f, '
'draw_probability=%s%s)' % args)
def rate_1vs1(rating1, rating2, drawn=False, min_delta=DELTA, env=None):
"""A shortcut to rate just 2 players in a head-to-head match::
alice, bob = Rating(25), Rating(30)
alice, bob = rate_1vs1(alice, bob)
alice, bob = rate_1vs1(alice, bob, drawn=True)
:param rating1: the winner's rating if they didn't draw.
:param rating2: the loser's rating if they didn't draw.
:param drawn: if the players drew, set this to ``True``. Defaults to
``False``.
:param min_delta: will be passed to :meth:`rate`.
:param env: the :class:`TrueSkill` object. Defaults to the global
environment.
:returns: a tuple containing recalculated 2 ratings.
.. versionadded:: 0.2
"""
if env is None:
env = global_env()
ranks = [0, 0 if drawn else 1]
teams = env.rate([(rating1,), (rating2,)], ranks, min_delta=min_delta)
return teams[0][0], teams[1][0]
def quality_1vs1(rating1, rating2, env=None):
"""A shortcut to calculate the match quality between just 2 players in
a head-to-head match::
if quality_1vs1(alice, bob) < 0.50:
print('This match seems to be not so fair')
:param rating1: the rating.
:param rating2: the another rating.
:param env: the :class:`TrueSkill` object. Defaults to the global
environment.
.. versionadded:: 0.2
"""
if env is None:
env = global_env()
return env.quality([(rating1,), (rating2,)])
def global_env():
"""Gets the :class:`TrueSkill` object | |
<reponame>eugenechantk/cs152-harnessing-ai-algorithm
import heapq
import numpy as np
import time
def flatten(board):
# if it's nested lists, flatten them. I do this with list comprehension taking each tile at a time from each sublist
if type(board[1])==list:
board = [item for sublist in board for item in sublist]
# else, it should be a list of ints or floats
elif type(board[1])==int or type(board[1])==float:
board = board
# if it's neither, it's a wrong input and will raise an error.
else:
raise ValueError("Class 'PuzzleNode' got values that are not a sublist of ints nor a flat list of ints.")
return board
"""
Class: PuzzleNode
Purpose: Object for each puzzle board created during search
Arg: None
Class Functions
__hash__(): return a hash value for the puzzle board to id the puzzle
__str__(): return a matrix representation of the puzzle board in string format
__eq__(others): return True if another PuzzleNode object is identical
get_moves(): return PuzzleNodes object that are the possible moves
of a puzzle
list_of_list(): transform the 1d array representation of the puzzle
into a multi-d array representation
verify_input: verify whether the puzzle is n*n, and has all the numbers
in the board
"""
class PuzzleNode():
def __init__(self, n, values, cost, parent, heuristic):
#parent of the candidate puzzle
self.parent = parent
#dimension of the puzzle
self.n = n
#value of the initial puzzle pieces, stored as a 1d array
self.tiles = flatten(values)
self.tiles=values
# To reconstruct the optimal solution, we need to store all the steps
# To easily access each of the puzzles we have gone through, we store the hash value associated with each puzzle
self.puzzleid = hash(tuple(self.tiles))
#Hash the puzzle to have the puzzleid, or just return the puzzleid if there is one
def __hash__(self):
if self.puzzleid is None:
self.puzzleid = hash(tuple(self.tiles))
return self.puzzleid
#Print out a grid of the board state
def __str__(self):
#To print a grid, it is easier to convert the 1d board array to text first
strings_list = [str(x) for x in self.tiles]
#Create n rows and n columns
rows = [" ".join(strings_list[i:i + self.n]) for i in xrange(0, self.n**2, self.n)]
#Break the rows into different lines
return "\n".join(rows)
# For checking if 2 candidate puzzles are equal
def __eq__(self, other):
return self.tiles == other.tiles
#For getting possible moves from the current state
def get_moves(self):
#get the index of where the 0 is
zeroPos = self.tiles.index(0)
n = self.n
candidates = []
#Swap appropriate tiles with the 0 tile
def swap(zeroPos,move,n):
temp = list(self.tiles)
swapPos = zeroPos + move
#Evaluating edge cases
if zeroPos%n == 0 and move == -1:
return
elif zeroPos%n == n-1 and move == 1:
return
elif zeroPos/n == 0 and move == -n:
return
elif zeroPos/n == n-1 and move == n:
return
else: #Swap tiles and create new PuzzleNode object to store the new board
temp[zeroPos],temp[swapPos] = temp[swapPos],temp[zeroPos]
result = PuzzleNode(self.n,temp,0,self.puzzleid,None)
return result
#Generate at most 4 candidate boards from the current board
if swap(zeroPos,1,n) is not None:
#print "+1 is added"
yield swap(zeroPos,1,n)
if swap(zeroPos,-1,n) is not None:
#print "-1 is added"
yield swap(zeroPos,-1,n)
if swap(zeroPos,n,n) is not None:
#print "+n is added"
yield swap(zeroPos,n,n)
if swap(zeroPos,-n,n) is not None:
#print "-n is added"
yield swap(zeroPos,-n,n)
#transform the tiles again from 1d array to list of lists
def list_of_list(self):
return [self.tiles[i:i+self.n] for i in xrange(0, self.n**2, self.n)]
#verify the validity of the initial board
def verify_input(self):
err = 0
reason = "Input was valid"
initial_state = self.tiles
n = self.n
#Check the dimension of the puzzle
if n<2 or n>=128:
err = -1
reason = "Puzzle size not valid"
#Check if the puzzle has the size of n^2
if len(initial_state) != n*n:
err = -1
reason = "Puzzle size is not n^2"
sorted_list = sorted(initial_state)
verified_list = range(n**2)
#Compare the puzzle list with all numbers from 0 to n^2-1
if sorted_list != verified_list:
err = -1
reason = "Puzzle does not contain all numbers from 0 to n^2-1"
#break the program when there is an error
if err == -1:
raise ValueError(reason)
return err, reason, initial_state
"""
Function isSolvable
Purpose: Determine whether a given board is solvable based on
inversion rule
Arg:
board: (list) a list_of_list representation of the board configuration
Return:
err: (int) -2 if the board is unsolvable; 0 if the board is solvable
reason: (str) the reason for the error code
"""
def isSolvable(board):
inversions = 0
n = int(len(board)**0.5)
zeroPos = board.index(0)
for i in xrange(len(board)):
for j in xrange(i+1,len(board)):
if board[i] > board[j] and board[j] != 0:
inversions += 1
if n%2 == 0: #grid width is even
if (zeroPos/n)%2 == 0: #0 tile on even row counting from bottom
if inversions%2 == 1: #inversions is odd is solvable
err = 0
reason = "The puzzle is solvable"
else:
err = -2
reason = "The puzzle's width is even, 0 tile on even row counting from bottom, inversions is even. Puzzle unsolvable"
else: #0 tile on odd row counting from bottom
if inversions%2 == 0: #inversions is even is solvable
err = 0
reason = "The puzzle is solvable"
else:
err = -2
reason = "The puzzle's width is even, 0 tile on odd row counting from bottom, inversions is odd. Puzzle unsolvable"
else: #grid width is odd
if inversions%2 == 0:
err = 0
reason = "The puzzle is solvable"
else:
err = -2
reason = "The puzzle's width is odd, and the inversions is odd. Puzzle unsolvable"
if err == -2:
raise ValueError(reason)
return err, reason
"""
Function: solvePuzzle
Purpose: Using A* search with heuristic to solve a n^n puzzle
Arg:
n: (int) dimension of the puzzle
state: (list) the initial puzzle board
heuristic: (function) the heuristic function used in the A* search
prnt: (boolean) whether or not to print the full solution
Return:
steps: (int) number of search steps before solving the puzzle
frontierSize: (int) largest frontier size during search
err: (int) 0 means no error; 1 means the puzzle is invalid
run_time: (time) the time needed to solve the puzzle
"""
def solvePuzzle (n, state, heuristic, prnt=False):
start_time = time.time()
run_time = 0.0
queue = [] #priority queue to determine the least costly node to search
total_cost = {} #total cost of the shortest path
heuristic_cost = {} #cache of previous heuristic cost of boards
visited = {} #the puzzle boards expanded and searched
steps_to_sol = [] #detailed steps towards solution
frontierSize = 0 #largest frontier size of the search tree
steps = -1 #number of steps needed to solve
tiles = flatten(state) #1d representation of the puzzle
#Defining current state and goal state
start = PuzzleNode(n, tiles, 0, None, heuristic)
goal = PuzzleNode(n, range(n**2),100,None,heuristic)
#verify whether the intial puzzle board is valid
err, reason, initial_state = start.verify_input()
#using isSolvable() to check whether the initial puzzle is solvable
if err == 0:
err, reason = isSolvable(start.tiles)
unsolved = True
#Initializing heap and total cost
heapq.heappush(queue,(0,start))
total_cost[start] = 0
if prnt:
if heuristic == manhattanDistance:
print "Solving using Manhattan Distance...\n"
elif heuristic == misplacedTiles:
print "Solving using Misplaced Tiles...\n"
print "Start solving puzzle from:\n"
print "{}\n".format(start.__str__())
#traverse through all the candidates until there is none
while unsolved:
steps += 1
#Select the least costly node to expand using priority queue
cost, current = heapq.heappop(queue)
current_cost = total_cost[current]
#Put the searched puzzle board into the visited store
visited[current] = current
#When the current board matches with the goal board
if current.tiles == goal.tiles:
unsolved = False
if prnt:
print "Puzzle Solved!\n"
print "Initial Puzzle Board:\n"
print "{}\n".format(start.__str__())
print "Final Puzzle Board:\n"
print "{}\n".format(current.__str__())
run_time = time.time()-start_time
break
if prnt:
print "Currently inspecting...\n"
print "{}\n".format(current.__str__())
#Evaluate every candidate move
candidates = current.get_moves()
for move in candidates:
"""For debugging
| |
method_map, resource)
def add_sink(self, sink, prefix=r'/'):
"""Registers a sink method for the API.
If no route matches a request, but the path in the requested URI
matches a sink prefix, Falcon will pass control to the
associated sink, regardless of the HTTP method requested.
Using sinks, you can drain and dynamically handle a large number
of routes, when creating static resources and responders would be
impractical. For example, you might use a sink to create a smart
proxy that forwards requests to one or more backend services.
Args:
sink (callable): A callable taking the form ``func(req, resp)``.
prefix (str): A regex string, typically starting with '/', which
will trigger the sink if it matches the path portion of the
request's URI. Both strings and precompiled regex objects
may be specified. Characters are matched starting at the
beginning of the URI path.
Note:
Named groups are converted to kwargs and passed to
the sink as such.
Warning:
If the prefix overlaps a registered route template,
the route will take precedence and mask the sink
(see also `add_route`).
"""
if not hasattr(prefix, 'match'):
# Assume it is a string
prefix = re.compile(prefix)
# NOTE(kgriffs): Insert at the head of the list such that
# in the case of a duplicate prefix, the last one added
# is preferred.
self._sinks.insert(0, (prefix, sink))
def add_error_handler(self, exception, handler=None):
"""Registers a handler for a given exception error type.
Args:
exception (type): Whenever an error occurs when handling a request
that is an instance of this exception class, the associated
handler will be called.
handler (callable): A function or callable object taking the form
``func(ex, req, resp, params)``.
If not specified explicitly, the handler will default to
``exception.handle``, where ``exception`` is the error
type specified above, and ``handle`` is a static method
(i.e., decorated with @staticmethod) that accepts
the same params just described. For example::
class CustomException(CustomBaseException):
@staticmethod
def handle(ex, req, resp, params):
# TODO: Log the error
# Convert to an instance of falcon.HTTPError
raise falcon.HTTPError(falcon.HTTP_792)
Note:
A handler can either raise an instance of ``HTTPError``
or modify `resp` manually in order to communicate
information about the issue to the client.
"""
if handler is None:
try:
handler = exception.handle
except AttributeError:
raise AttributeError('handler must either be specified '
'explicitly or defined as a static'
'method named "handle" that is a '
'member of the given exception class.')
# Insert at the head of the list in case we get duplicate
# adds (will cause the most recently added one to win).
self._error_handlers.insert(0, (exception, handler))
def set_error_serializer(self, serializer):
"""Override the default serializer for instances of HTTPError.
When a responder raises an instance of HTTPError, Falcon converts
it to an HTTP response automatically. The default serializer
supports JSON and XML, but may be overridden by this method to
use a custom serializer in order to support other media types.
The ``falcon.HTTPError`` class contains helper methods, such as
`to_json()` and `to_dict()`, that can be used from within
custom serializers. For example::
def my_serializer(req, exception):
representation = None
preferred = req.client_prefers(('application/x-yaml',
'application/json'))
if preferred is not None:
if preferred == 'application/json':
representation = exception.to_json()
else:
representation = yaml.dump(exception.to_dict(),
encoding=None)
return (preferred, representation)
Note:
If a custom media type is used and the type includes a
"+json" or "+xml" suffix, the default serializer will
convert the error to JSON or XML, respectively. If this
is not desirable, a custom error serializer may be used
to override this behavior.
Args:
serializer (callable): A function taking the form
``func(req, exception)``, where `req` is the request
object that was passed to the responder method, and
`exception` is an instance of ``falcon.HTTPError``.
The function must return a ``tuple`` of the form
(*media_type*, *representation*), or (``None``, ``None``)
if the client does not support any of the
available media types.
"""
self._serialize_error = serializer
# ------------------------------------------------------------------------
# Helpers that require self
# ------------------------------------------------------------------------
def _get_responder(self, req):
"""Searches routes for a matching responder.
Args:
req: The request object.
Returns:
A 3-member tuple consisting of a responder callable,
a ``dict`` containing parsed path fields (if any were specified in
the matching route's URI template), and a reference to the
responder's resource instance.
Note:
If a responder was matched to the given URI, but the HTTP
method was not found in the method_map for the responder,
the responder callable element of the returned tuple will be
`falcon.responder.bad_request`.
Likewise, if no responder was matched for the given URI, then
the responder callable element of the returned tuple will be
`falcon.responder.path_not_found`
"""
path = req.path
method = req.method
resource, method_map, params = self._router.find(path)
if resource is not None:
try:
responder = method_map[method]
except KeyError:
responder = falcon.responders.bad_request
else:
params = {}
resource = None
for pattern, sink in self._sinks:
m = pattern.match(path)
if m:
params = m.groupdict()
responder = sink
break
else:
responder = falcon.responders.path_not_found
return (responder, params, resource)
def _compose_status_response(self, req, resp, http_status):
"""Composes a response for the given HTTPStatus instance."""
# PERF(kgriffs): The code to set the status and headers is identical
# to that used in _compose_error_response(), but refactoring in the
# name of DRY isn't worth the extra CPU cycles.
resp.status = http_status.status
if http_status.headers is not None:
resp.set_headers(http_status.headers)
# NOTE(kgriffs): If http_status.body is None, that's OK because
# it's acceptable to set resp.body to None (to indicate no body).
resp.body = http_status.body
def _compose_error_response(self, req, resp, error):
"""Composes a response for the given HTTPError instance."""
resp.status = error.status
if error.headers is not None:
resp.set_headers(error.headers)
if error.has_representation:
media_type, body = self._serialize_error(req, error)
if body is not None:
resp.body = body
# NOTE(kgriffs): This must be done AFTER setting the headers
# from error.headers so that we will override Content-Type if
# it was mistakenly set by the app.
resp.content_type = media_type
def _call_req_mw(self, stack, req, resp):
"""Run process_request middleware methods."""
for component in self._middleware:
process_request, _, _ = component
if process_request is not None:
process_request(req, resp)
# Put executed component on the stack
stack.append(component) # keep track from outside
def _call_rsrc_mw(self, stack, req, resp, resource):
"""Run process_resource middleware methods."""
for component in self._middleware:
_, process_resource, _ = component
if process_resource is not None:
process_resource(req, resp, resource)
def _call_resp_mw(self, stack, req, resp, resource):
"""Run process_response middleware."""
while stack:
_, _, process_response = stack.pop()
if process_response is not None:
process_response(req, resp, resource)
def _call_after_hooks(self, req, resp, resource):
"""Executes each of the global "after" hooks, in turn."""
if not self._after:
return
for hook in self._after:
try:
hook(req, resp, resource)
except TypeError:
# NOTE(kgriffs): Catching the TypeError is a heuristic to
# detect old hooks that do not accept the "resource" param
hook(req, resp)
# PERF(kgriffs): Moved from api_helpers since it is slightly faster
# to call using self, and this function is called for most
# requests.
def _set_content_length(self, resp):
"""Set Content-Length when given a fully-buffered body or stream len.
Pre:
Either resp.body or resp.stream is set
Post:
resp contains a "Content-Length" header unless a stream is given,
but resp.stream_len is not set (in which case, the length
cannot be derived reliably).
Args:
resp: The response object on which to set the content length.
"""
content_length = 0
if resp.body_encoded is not None:
# Since body is assumed to be a byte string (str in Python 2,
# bytes in Python 3), figure out the length using standard
# functions.
content_length = len(resp.body_encoded)
elif resp.data is not None:
content_length = len(resp.data)
elif resp.stream is not None:
if resp.stream_len is not None:
# Total stream length is known in advance
content_length = resp.stream_len
else:
# Stream given, but length is unknown (dynamically-
# generated body). Do not set the header.
return -1
resp.set_header('Content-Length', str(content_length))
return content_length
# PERF(kgriffs): Moved from api_helpers since it is slightly faster
# to call using self, and this function is called for most
# requests.
def | |
<reponame>pulumi/pulumi-alicloud
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['RegistryEnterpriseSyncRuleArgs', 'RegistryEnterpriseSyncRule']
@pulumi.input_type
class RegistryEnterpriseSyncRuleArgs:
def __init__(__self__, *,
instance_id: pulumi.Input[str],
namespace_name: pulumi.Input[str],
tag_filter: pulumi.Input[str],
target_instance_id: pulumi.Input[str],
target_namespace_name: pulumi.Input[str],
target_region_id: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
repo_name: Optional[pulumi.Input[str]] = None,
target_repo_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a RegistryEnterpriseSyncRule resource.
:param pulumi.Input[str] instance_id: ID of Container Registry Enterprise Edition source instance.
:param pulumi.Input[str] namespace_name: Name of Container Registry Enterprise Edition source namespace. It can contain 2 to 30 characters.
:param pulumi.Input[str] tag_filter: The regular expression used to filter image tags for synchronization in the source repository.
:param pulumi.Input[str] target_instance_id: ID of Container Registry Enterprise Edition target instance to be synchronized.
:param pulumi.Input[str] target_namespace_name: Name of Container Registry Enterprise Edition target namespace to be synchronized. It can contain 2 to 30 characters.
:param pulumi.Input[str] target_region_id: The target region to be synchronized.
:param pulumi.Input[str] name: Name of Container Registry Enterprise Edition sync rule.
:param pulumi.Input[str] repo_name: Name of the source repository which should be set together with `target_repo_name`, if empty means that the synchronization scope is the entire namespace level.
:param pulumi.Input[str] target_repo_name: Name of the target repository.
"""
pulumi.set(__self__, "instance_id", instance_id)
pulumi.set(__self__, "namespace_name", namespace_name)
pulumi.set(__self__, "tag_filter", tag_filter)
pulumi.set(__self__, "target_instance_id", target_instance_id)
pulumi.set(__self__, "target_namespace_name", target_namespace_name)
pulumi.set(__self__, "target_region_id", target_region_id)
if name is not None:
pulumi.set(__self__, "name", name)
if repo_name is not None:
pulumi.set(__self__, "repo_name", repo_name)
if target_repo_name is not None:
pulumi.set(__self__, "target_repo_name", target_repo_name)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Input[str]:
"""
ID of Container Registry Enterprise Edition source instance.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="namespaceName")
def namespace_name(self) -> pulumi.Input[str]:
"""
Name of Container Registry Enterprise Edition source namespace. It can contain 2 to 30 characters.
"""
return pulumi.get(self, "namespace_name")
@namespace_name.setter
def namespace_name(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace_name", value)
@property
@pulumi.getter(name="tagFilter")
def tag_filter(self) -> pulumi.Input[str]:
"""
The regular expression used to filter image tags for synchronization in the source repository.
"""
return pulumi.get(self, "tag_filter")
@tag_filter.setter
def tag_filter(self, value: pulumi.Input[str]):
pulumi.set(self, "tag_filter", value)
@property
@pulumi.getter(name="targetInstanceId")
def target_instance_id(self) -> pulumi.Input[str]:
"""
ID of Container Registry Enterprise Edition target instance to be synchronized.
"""
return pulumi.get(self, "target_instance_id")
@target_instance_id.setter
def target_instance_id(self, value: pulumi.Input[str]):
pulumi.set(self, "target_instance_id", value)
@property
@pulumi.getter(name="targetNamespaceName")
def target_namespace_name(self) -> pulumi.Input[str]:
"""
Name of Container Registry Enterprise Edition target namespace to be synchronized. It can contain 2 to 30 characters.
"""
return pulumi.get(self, "target_namespace_name")
@target_namespace_name.setter
def target_namespace_name(self, value: pulumi.Input[str]):
pulumi.set(self, "target_namespace_name", value)
@property
@pulumi.getter(name="targetRegionId")
def target_region_id(self) -> pulumi.Input[str]:
"""
The target region to be synchronized.
"""
return pulumi.get(self, "target_region_id")
@target_region_id.setter
def target_region_id(self, value: pulumi.Input[str]):
pulumi.set(self, "target_region_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of Container Registry Enterprise Edition sync rule.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="repoName")
def repo_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the source repository which should be set together with `target_repo_name`, if empty means that the synchronization scope is the entire namespace level.
"""
return pulumi.get(self, "repo_name")
@repo_name.setter
def repo_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repo_name", value)
@property
@pulumi.getter(name="targetRepoName")
def target_repo_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the target repository.
"""
return pulumi.get(self, "target_repo_name")
@target_repo_name.setter
def target_repo_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_repo_name", value)
@pulumi.input_type
class _RegistryEnterpriseSyncRuleState:
def __init__(__self__, *,
instance_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
repo_name: Optional[pulumi.Input[str]] = None,
rule_id: Optional[pulumi.Input[str]] = None,
sync_direction: Optional[pulumi.Input[str]] = None,
sync_scope: Optional[pulumi.Input[str]] = None,
tag_filter: Optional[pulumi.Input[str]] = None,
target_instance_id: Optional[pulumi.Input[str]] = None,
target_namespace_name: Optional[pulumi.Input[str]] = None,
target_region_id: Optional[pulumi.Input[str]] = None,
target_repo_name: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering RegistryEnterpriseSyncRule resources.
:param pulumi.Input[str] instance_id: ID of Container Registry Enterprise Edition source instance.
:param pulumi.Input[str] name: Name of Container Registry Enterprise Edition sync rule.
:param pulumi.Input[str] namespace_name: Name of Container Registry Enterprise Edition source namespace. It can contain 2 to 30 characters.
:param pulumi.Input[str] repo_name: Name of the source repository which should be set together with `target_repo_name`, if empty means that the synchronization scope is the entire namespace level.
:param pulumi.Input[str] rule_id: The uuid of Container Registry Enterprise Edition sync rule.
:param pulumi.Input[str] sync_direction: `FROM` or `TO`, the direction of synchronization. `FROM` means source instance, `TO` means target instance.
:param pulumi.Input[str] sync_scope: `REPO` or `NAMESPACE`,the scope that the synchronization rule applies.
:param pulumi.Input[str] tag_filter: The regular expression used to filter image tags for synchronization in the source repository.
:param pulumi.Input[str] target_instance_id: ID of Container Registry Enterprise Edition target instance to be synchronized.
:param pulumi.Input[str] target_namespace_name: Name of Container Registry Enterprise Edition target namespace to be synchronized. It can contain 2 to 30 characters.
:param pulumi.Input[str] target_region_id: The target region to be synchronized.
:param pulumi.Input[str] target_repo_name: Name of the target repository.
"""
if instance_id is not None:
pulumi.set(__self__, "instance_id", instance_id)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace_name is not None:
pulumi.set(__self__, "namespace_name", namespace_name)
if repo_name is not None:
pulumi.set(__self__, "repo_name", repo_name)
if rule_id is not None:
pulumi.set(__self__, "rule_id", rule_id)
if sync_direction is not None:
pulumi.set(__self__, "sync_direction", sync_direction)
if sync_scope is not None:
pulumi.set(__self__, "sync_scope", sync_scope)
if tag_filter is not None:
pulumi.set(__self__, "tag_filter", tag_filter)
if target_instance_id is not None:
pulumi.set(__self__, "target_instance_id", target_instance_id)
if target_namespace_name is not None:
pulumi.set(__self__, "target_namespace_name", target_namespace_name)
if target_region_id is not None:
pulumi.set(__self__, "target_region_id", target_region_id)
if target_repo_name is not None:
pulumi.set(__self__, "target_repo_name", target_repo_name)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of Container Registry Enterprise Edition source instance.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of Container Registry Enterprise Edition sync rule.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="namespaceName")
def namespace_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of Container Registry Enterprise Edition source namespace. It can contain 2 to 30 characters.
"""
return pulumi.get(self, "namespace_name")
@namespace_name.setter
def namespace_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace_name", value)
@property
@pulumi.getter(name="repoName")
def repo_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the source repository which should be set together with `target_repo_name`, if empty means that the synchronization scope is the entire namespace level.
"""
return pulumi.get(self, "repo_name")
@repo_name.setter
def repo_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repo_name", value)
@property
@pulumi.getter(name="ruleId")
def rule_id(self) -> Optional[pulumi.Input[str]]:
"""
The uuid of Container Registry Enterprise Edition sync rule.
"""
return pulumi.get(self, "rule_id")
@rule_id.setter
def rule_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rule_id", value)
@property
@pulumi.getter(name="syncDirection")
def sync_direction(self) -> Optional[pulumi.Input[str]]:
"""
`FROM` or `TO`, the direction of synchronization. `FROM` means source instance, `TO` means target instance.
"""
return pulumi.get(self, "sync_direction")
@sync_direction.setter
def sync_direction(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sync_direction", value)
@property
@pulumi.getter(name="syncScope")
def sync_scope(self) -> Optional[pulumi.Input[str]]:
"""
`REPO` or `NAMESPACE`,the scope that the synchronization rule applies.
"""
return pulumi.get(self, "sync_scope")
@sync_scope.setter
def sync_scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sync_scope", value)
@property
@pulumi.getter(name="tagFilter")
def tag_filter(self) -> Optional[pulumi.Input[str]]:
"""
The regular expression used to filter image tags for synchronization in the source repository.
"""
return pulumi.get(self, "tag_filter")
@tag_filter.setter
def tag_filter(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tag_filter", value)
@property
@pulumi.getter(name="targetInstanceId")
def target_instance_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of Container Registry Enterprise Edition target instance to be synchronized.
"""
return pulumi.get(self, "target_instance_id")
@target_instance_id.setter
def target_instance_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_instance_id", value)
@property
@pulumi.getter(name="targetNamespaceName")
def target_namespace_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of Container Registry Enterprise Edition target namespace to be synchronized. It can contain 2 to 30 characters.
"""
return pulumi.get(self, "target_namespace_name")
@target_namespace_name.setter
def target_namespace_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_namespace_name", value)
@property
@pulumi.getter(name="targetRegionId")
def target_region_id(self) -> Optional[pulumi.Input[str]]:
"""
The target region to be synchronized.
"""
return pulumi.get(self, "target_region_id")
@target_region_id.setter
def target_region_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_region_id", value)
@property
@pulumi.getter(name="targetRepoName")
def target_repo_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the target repository.
"""
return pulumi.get(self, "target_repo_name")
@target_repo_name.setter
def target_repo_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_repo_name", value)
class RegistryEnterpriseSyncRule(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
instance_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
repo_name: Optional[pulumi.Input[str]] = None,
| |
of the epoch, reset all metrics.
"""
super().reset()
if self.stream:
self.data.reset()
self.epochDone = False
def share(self):
"""
Share the data.
"""
shared = super().share()
if hasattr(self, 'data'):
shared['data'] = self.data.share()
return shared
def label_candidates(self):
"""
Provide consistent label candidates for all examples.
Default implementation returns ``None`` always, but this may be overridden to
provide candidates in all areas. See ``FbDialogueTeacher``.
"""
# TODO DEPRECATIONDAY: FbDialogueTeacher is being deprecated, should we
# remove this?
# TODO: mark as optionally abstract?
return None
def num_episodes(self) -> int:
"""
Return the number of episodes in the data.
"""
try:
return self.data.num_episodes()
except AttributeError:
return super().num_episodes()
def num_examples(self) -> int:
"""
Return the number of examples in the data.
"""
if hasattr(self, '_num_examples_cache'):
return self._num_examples_cache
try:
self._num_examples_cache: int = self.data.num_examples()
except AttributeError:
self._num_examples_cache = super().num_examples()
return self._num_examples_cache
def get(self, episode_idx, entry_idx=0):
"""
Get a specific example.
"""
return self.data.get(episode_idx, entry_idx)[0]
def next_example(self):
"""
Get the next example.
"""
if self.stream:
# unfortunately we need to also do the mutator buffering here.
# it's difficult to structure it so it's not
if hasattr(self, 'episode_buffer') and self.episode_buffer:
action = self.episode_buffer.pop(0)
epoch_done = (not self.episode_buffer) and self._saw_epoch_done
return action, epoch_done
episode_buffer = []
while True:
action, epoch_done = self.data.get()
episode_buffer.append(action)
if action['episode_done']:
self._saw_epoch_done = epoch_done
break
# perform any mutations there are
if self.mutators:
episode_buffer = [m.copy() for m in episode_buffer]
for mutator in self.mutators:
episode_buffer = mutator(episode_buffer)
# make sure mutations are fully realized (not generators)
self.episode_buffer = list(episode_buffer)
# The recursive call has dual purpose:
# - if we get back an empty episode after mutating, skip it gracefully
# - pull the first item the episode w/ epoch_done logic, but DRY
return self.next_example()
else:
action, epoch_done = super().next_example()
return action, epoch_done
class DialogData(object):
"""
Provides a data structure for accessing textual dialog data.
This can be used whenever the dialog data is a fixed log of chats
(i.e not a simulator setting). The logs can include dialog text and possibly
supervised labels, candidate labels and rewards.
All these are stored in this internal data format which is used by the
``DialogTeacher`` class.
:param opt:
options to initialize the class
:param data_loader:
an iterable with each call returning a tuple in the form
``((x, y, r, c, i), new_episode?)`` where the ``x`` and ``new_episode``
fields are mandatory and other fields may be omitted or ``None``.
:param cands:
can be set to provide a list of candidate labels for every example in
this dataset, which the agent can choose from (the correct answer
should be in this set).
:param random:
tells the data class whether or not to visit episodes sequentially or
randomly when returning examples to the caller.
The contents of the ``((x, y, r, c, i), new_episode?)`` tuples returned by
the data loader is the following:
- ``x`` (str) is a query and possibly context
- ``y`` (iter) is an iterable of label(s) for that query
- ``r`` (str) is the str reward for getting that query correct
- ``c`` (iter) is an iterable of label candidates that the student can choose from
- ``i`` (str) is a str path to an image on disk, which will be loaded by the
data class at request-time. should always point to the raw image file.
- ``new_episode?`` (bool) is a boolean value specifying whether that example
is the start of a new episode. If you don't use episodes set this
to ``True`` every time.
"""
def __init__(self, opt, data_loader=None, cands=None, shared=None, **kwargs):
# in case we need to shard the dataset
self.rank = get_rank()
self.num_workers = num_workers()
self.is_distributed_and_is_eval = is_distributed() and any(
x in opt['datatype'] for x in ('valid', 'test', 'train:evalmode')
)
# self.data is a list of episodes
# each episode is a tuple of entries
# each entry is a tuple of values for the action/observation table
if shared:
self.image_loader = shared.get('image_loader', None)
self.data = shared.get('data', [])
self.cands = shared.get('cands', None)
else:
self.image_loader = ImageLoader(opt)
self.data = []
if 'datafile' not in opt:
raise KeyError(
ERROR_MESSAGE_NO_DATAFILE.format(class_name=self.__class__.__name__)
)
self._load(data_loader, opt['datafile'])
self.cands = None if cands is None else set(c for c in cands)
self.addedCands = []
self.copied_cands = False
def share(self):
"""
Share the data.
"""
shared = {
'data': self.data,
'cands': self.cands,
'image_loader': self.image_loader,
}
return shared
def _read_episode(self, data_loader):
"""
Read one episode at a time from the provided iterable over entries.
:param data_loader:
an iterable which returns tuples in the format described in the
class docstring.
"""
episode = []
for entry, new in data_loader:
if new and len(episode) > 0:
yield episode
episode = []
episode.append(entry)
if len(episode) > 0:
yield episode
def _load(self, data_loader, datafile):
"""
Load up data from an iterable over tuples described in the class docs.
:param iter data_loader:
an iterator which returns tuples in the format described in the
class docstring.
:param str datafile:
"""
for i, episode in enumerate(self._read_episode(data_loader(datafile))):
if not self.is_distributed_and_is_eval or i % self.num_workers == self.rank:
self.data.append(episode)
def num_episodes(self):
"""
Return number of episodes in the dataset.
"""
return len(self.data)
def num_examples(self):
"""
Return total number of entries available.
Each episode has at least one entry, but might have many more.
"""
if hasattr(self, '_num_examples_cache'):
return self._num_examples_cache
self._num_examples_cache = sum(len(episode) for episode in self.data)
return self._num_examples_cache
def get(self, episode_idx, entry_idx=0):
"""
Get the specified episode and the specified entry in that episode.
:param episode_idx:
which episode to return examples from
:param entry_idx:
which example to return from the episode. Many datasets have only
single-entry episodes, so this defaults to zero.
"""
if episode_idx >= len(self.data):
return Message.padding_example(), True
next_episode_idx_for_rank = episode_idx + 1
# first look up data
episode = self.data[episode_idx]
entry = episode[entry_idx]
episode_done = entry_idx == len(episode) - 1
end_of_data = episode_done and next_episode_idx_for_rank >= len(self.data)
# now pack it in a action-observation dictionary
table = self.build_table(entry)
# last entry in this episode
table['episode_done'] = episode_done
return table, end_of_data
def build_table(self, entry):
"""
Packs an entry into an action-observation dictionary.
:param entry: a tuple in the form described in the class docstring.
"""
if isinstance(entry, (dict, Message)):
# user is already provided things
if 'eval_labels' in entry or 'eval_label' in entry:
raise KeyError(
'Labels are converted to eval_labels automatically. Please do not '
'set them in setup_data.'
)
if 'episode_done' in entry:
raise KeyError(
"episode_done is set automatically for you. Please don't set it "
"in setup_data."
)
if 'label' in entry:
# for convenience, rename to the labels convention automatically
label = entry.pop('label')
assert isinstance(label, str)
entry['labels'] = (label,)
if 'labels' in entry and isinstance(entry['labels'], str):
entry['labels'] = (entry['labels'],)
table = entry.copy()
elif isinstance(entry, (Tuple, List)):
table = {}
if entry[0] is not None:
table['text'] = entry[0]
if len(entry) > 1 and entry[1] is not None:
l = entry[1]
if isinstance(l, str):
l = (l,)
table['labels'] = l
if len(entry) > 2 and entry[2] is not None:
table['reward'] = entry[2]
if len(entry) > 3 and entry[3] is not None:
table['label_candidates'] = entry[3]
if len(entry) > 4 and entry[4] is not None:
img = self.image_loader.load(entry[4])
if img is not None:
table['image'] = img
else:
raise TypeError(
f"items out of setup_data should be dict, Message, list, or tuple. "
f"Got {type(entry)})"
)
if table.get('labels', None) is not None and self.cands is not None:
if self.addedCands:
# remove elements in addedCands
self.cands.difference_update(self.addedCands)
self.addedCands.clear()
for label in table['labels']:
if label not in self.cands:
# add labels, queue them for removal next time
if not self.copied_cands:
self.cands = self.cands.copy()
self.copied_cands = True
self.cands.add(label)
self.addedCands.append(label)
table['label_candidates'] = self.cands
if 'labels' in table and 'label_candidates' in table:
if table['labels'][0] not in table['label_candidates']:
raise RuntimeError('true label missing from candidate labels')
# go ahead and make it a message
if isinstance(table, dict):
table = Message(table)
return table
class StreamDialogData(DialogData):
"""
Provides a data structure for streaming textual dialog data.
This can | |
m.x35 - m.x36 + 2.7848926778388*m.b559 <= 3.88350496650691)
m.c1448 = Constraint(expr= m.x37 - m.x38 + 2.45503721618572*m.b549 <= 3.55364950485383)
m.c1449 = Constraint(expr= m.x38 - m.x39 + 2.45503721618572*m.b550 <= 3.55364950485383)
m.c1450 = Constraint(expr= m.x39 - m.x40 + 2.45503721618572*m.b551 <= 3.55364950485383)
m.c1451 = Constraint(expr= m.x40 - m.x41 + 2.45503721618572*m.b552 <= 3.55364950485383)
m.c1452 = Constraint(expr= m.x41 - m.x42 + 2.45503721618572*m.b553 <= 3.55364950485383)
m.c1453 = Constraint(expr= m.x42 - m.x43 + 2.45503721618572*m.b554 <= 3.55364950485383)
m.c1454 = Constraint(expr= m.x43 - m.x44 + 2.45503721618572*m.b555 <= 3.55364950485383)
m.c1455 = Constraint(expr= m.x44 - m.x45 + 2.45503721618572*m.b556 <= 3.55364950485383)
m.c1456 = Constraint(expr= m.x45 - m.x46 + 2.45503721618572*m.b557 <= 3.55364950485383)
m.c1457 = Constraint(expr= m.x46 - m.x47 + 2.45503721618572*m.b558 <= 3.55364950485383)
m.c1458 = Constraint(expr= m.x47 - m.x48 + 2.45503721618572*m.b559 <= 3.55364950485383)
m.c1459 = Constraint(expr= m.x49 - m.x50 + 2.38472523684503*m.b549 <= 3.48333752551314)
m.c1460 = Constraint(expr= m.x50 - m.x51 + 2.38472523684503*m.b550 <= 3.48333752551314)
m.c1461 = Constraint(expr= m.x51 - m.x52 + 2.38472523684503*m.b551 <= 3.48333752551314)
m.c1462 = Constraint(expr= m.x52 - m.x53 + 2.38472523684503*m.b552 <= 3.48333752551314)
m.c1463 = Constraint(expr= m.x53 - m.x54 + 2.38472523684503*m.b553 <= 3.48333752551314)
m.c1464 = Constraint(expr= m.x54 - m.x55 + 2.38472523684503*m.b554 <= 3.48333752551314)
m.c1465 = Constraint(expr= m.x55 - m.x56 + 2.38472523684503*m.b555 <= 3.48333752551314)
m.c1466 = Constraint(expr= m.x56 - m.x57 + 2.38472523684503*m.b556 <= 3.48333752551314)
m.c1467 = Constraint(expr= m.x57 - m.x58 + 2.38472523684503*m.b557 <= 3.48333752551314)
m.c1468 = Constraint(expr= m.x58 - m.x59 + 2.38472523684503*m.b558 <= 3.48333752551314)
m.c1469 = Constraint(expr= m.x59 - m.x60 + 2.38472523684503*m.b559 <= 3.48333752551314)
m.c1470 = Constraint(expr= m.x61 - m.x62 + 2.44046880035743*m.b549 <= 3.53908108902554)
m.c1471 = Constraint(expr= m.x62 - m.x63 + 2.44046880035743*m.b550 <= 3.53908108902554)
m.c1472 = Constraint(expr= m.x63 - m.x64 + 2.44046880035743*m.b551 <= 3.53908108902554)
m.c1473 = Constraint(expr= m.x64 - m.x65 + 2.44046880035743*m.b552 <= 3.53908108902554)
m.c1474 = Constraint(expr= m.x65 - m.x66 + 2.44046880035743*m.b553 <= 3.53908108902554)
m.c1475 = Constraint(expr= m.x66 - m.x67 + 2.44046880035743*m.b554 <= 3.53908108902554)
m.c1476 = Constraint(expr= m.x67 - m.x68 + 2.44046880035743*m.b555 <= 3.53908108902554)
m.c1477 = Constraint(expr= m.x68 - m.x69 + 2.44046880035743*m.b556 <= 3.53908108902554)
m.c1478 = Constraint(expr= m.x69 - m.x70 + 2.44046880035743*m.b557 <= 3.53908108902554)
m.c1479 = Constraint(expr= m.x70 - m.x71 + 2.44046880035743*m.b558 <= 3.53908108902554)
m.c1480 = Constraint(expr= m.x71 - m.x72 + 2.44046880035743*m.b559 <= 3.53908108902554)
m.c1481 = Constraint(expr= m.x73 - m.x74 + 3.25858237030459*m.b549 <= 4.3571946589727)
m.c1482 = Constraint(expr= m.x74 - m.x75 + 3.25858237030459*m.b550 <= 4.3571946589727)
m.c1483 = Constraint(expr= m.x75 - m.x76 + 3.25858237030459*m.b551 <= 4.3571946589727)
m.c1484 = Constraint(expr= m.x76 - m.x77 + 3.25858237030459*m.b552 <= 4.3571946589727)
m.c1485 = Constraint(expr= m.x77 - m.x78 + 3.25858237030459*m.b553 <= 4.3571946589727)
m.c1486 = Constraint(expr= m.x78 - m.x79 + 3.25858237030459*m.b554 <= 4.3571946589727)
m.c1487 = Constraint(expr= m.x79 - m.x80 + 3.25858237030459*m.b555 <= 4.3571946589727)
m.c1488 = Constraint(expr= m.x80 - m.x81 + 3.25858237030459*m.b556 <= 4.3571946589727)
m.c1489 = Constraint(expr= m.x81 - m.x82 + 3.25858237030459*m.b557 <= 4.3571946589727)
m.c1490 = Constraint(expr= m.x82 - m.x83 + 3.25858237030459*m.b558 <= 4.3571946589727)
m.c1491 = Constraint(expr= m.x83 - m.x84 + 3.25858237030459*m.b559 <= 4.3571946589727)
m.c1492 = Constraint(expr= m.x85 - m.x86 + 2.35846029688671*m.b549 <= 3.45707258555482)
m.c1493 = Constraint(expr= m.x86 - m.x87 + 2.35846029688671*m.b550 <= 3.45707258555482)
m.c1494 = Constraint(expr= m.x87 - m.x88 + 2.35846029688671*m.b551 <= 3.45707258555482)
m.c1495 = Constraint(expr= m.x88 - m.x89 + 2.35846029688671*m.b552 <= 3.45707258555482)
m.c1496 = Constraint(expr= m.x89 - m.x90 + 2.35846029688671*m.b553 <= 3.45707258555482)
m.c1497 = Constraint(expr= m.x90 - m.x91 + 2.35846029688671*m.b554 <= 3.45707258555482)
m.c1498 = Constraint(expr= m.x91 - m.x92 + 2.35846029688671*m.b555 <= 3.45707258555482)
m.c1499 = Constraint(expr= m.x92 - m.x93 + 2.35846029688671*m.b556 <= 3.45707258555482)
m.c1500 = Constraint(expr= m.x93 - m.x94 + 2.35846029688671*m.b557 <= 3.45707258555482)
m.c1501 = Constraint(expr= m.x94 - m.x95 + 2.35846029688671*m.b558 <= 3.45707258555482)
m.c1502 = Constraint(expr= m.x95 - m.x96 + 2.35846029688671*m.b559 <= 3.45707258555482)
m.c1503 = Constraint(expr= m.x97 - m.x98 + 2.3960170377866*m.b549 <= 3.49462932645471)
m.c1504 = Constraint(expr= m.x98 - m.x99 + 2.3960170377866*m.b550 <= 3.49462932645471)
m.c1505 = Constraint(expr= m.x99 - m.x100 + 2.3960170377866*m.b551 <= 3.49462932645471)
m.c1506 = Constraint(expr= m.x100 - m.x101 + 2.3960170377866*m.b552 <= 3.49462932645471)
m.c1507 = Constraint(expr= m.x101 - m.x102 + 2.3960170377866*m.b553 <= 3.49462932645471)
m.c1508 = Constraint(expr= m.x102 - m.x103 + 2.3960170377866*m.b554 <= 3.49462932645471)
m.c1509 = Constraint(expr= m.x103 - m.x104 + 2.3960170377866*m.b555 <= 3.49462932645471)
m.c1510 = Constraint(expr= m.x104 - m.x105 + 2.3960170377866*m.b556 <= 3.49462932645471)
m.c1511 = Constraint(expr= m.x105 - m.x106 + 2.3960170377866*m.b557 <= 3.49462932645471)
m.c1512 = Constraint(expr= m.x106 - m.x107 + 2.3960170377866*m.b558 <= 3.49462932645471)
m.c1513 = Constraint(expr= m.x107 - m.x108 + 2.3960170377866*m.b559 <= 3.49462932645471)
m.c1514 = Constraint(expr= m.x109 - m.x110 + 2.28868218782505*m.b549 <= 3.38729447649316)
m.c1515 = Constraint(expr= m.x110 - m.x111 + 2.28868218782505*m.b550 <= 3.38729447649316)
m.c1516 = Constraint(expr= m.x111 - m.x112 + 2.28868218782505*m.b551 <= 3.38729447649316)
m.c1517 = Constraint(expr= m.x112 - m.x113 + 2.28868218782505*m.b552 <= 3.38729447649316)
m.c1518 = Constraint(expr= m.x113 - m.x114 + 2.28868218782505*m.b553 <= 3.38729447649316)
m.c1519 = Constraint(expr= m.x114 - m.x115 + 2.28868218782505*m.b554 <= 3.38729447649316)
m.c1520 = Constraint(expr= m.x115 - m.x116 + 2.28868218782505*m.b555 <= 3.38729447649316)
m.c1521 = Constraint(expr= m.x116 - m.x117 + 2.28868218782505*m.b556 <= 3.38729447649316)
m.c1522 = Constraint(expr= m.x117 - m.x118 + 2.28868218782505*m.b557 <= 3.38729447649316)
m.c1523 = Constraint(expr= m.x118 - m.x119 + 2.28868218782505*m.b558 <= 3.38729447649316)
m.c1524 = Constraint(expr= m.x119 - m.x120 + 2.28868218782505*m.b559 <= 3.38729447649316)
m.c1525 = Constraint(expr= m.x121 - m.x122 + 2.32790290097834*m.b549 <= 3.42651518964645)
m.c1526 = Constraint(expr= m.x122 - m.x123 + 2.32790290097834*m.b550 <= 3.42651518964645)
m.c1527 = Constraint(expr= m.x123 - m.x124 + 2.32790290097834*m.b551 <= 3.42651518964645)
m.c1528 = Constraint(expr= m.x124 - m.x125 + 2.32790290097834*m.b552 <= 3.42651518964645)
m.c1529 = Constraint(expr= m.x125 - m.x126 + 2.32790290097834*m.b553 <= 3.42651518964645)
m.c1530 = Constraint(expr= m.x126 - m.x127 + 2.32790290097834*m.b554 <= 3.42651518964645)
m.c1531 = Constraint(expr= m.x127 - m.x128 + 2.32790290097834*m.b555 <= 3.42651518964645)
m.c1532 = Constraint(expr= m.x128 - m.x129 + 2.32790290097834*m.b556 <= 3.42651518964645)
m.c1533 = Constraint(expr= m.x129 - m.x130 + 2.32790290097834*m.b557 <= 3.42651518964645)
m.c1534 = Constraint(expr= m.x130 - m.x131 + 2.32790290097834*m.b558 <= 3.42651518964645)
m.c1535 = Constraint(expr= m.x131 - m.x132 + 2.32790290097834*m.b559 <= 3.42651518964645)
m.c1536 = Constraint(expr= m.x133 - m.x134 + 2.17630492708372*m.b549 <= 3.27491721575183)
m.c1537 = Constraint(expr= m.x134 - m.x135 + 2.17630492708372*m.b550 <= 3.27491721575183)
m.c1538 = Constraint(expr= m.x135 - m.x136 + 2.17630492708372*m.b551 <= 3.27491721575183)
m.c1539 = Constraint(expr= m.x136 - m.x137 + 2.17630492708372*m.b552 <= 3.27491721575183)
m.c1540 = Constraint(expr= m.x137 - m.x138 + 2.17630492708372*m.b553 <= 3.27491721575183)
m.c1541 = Constraint(expr= m.x138 - m.x139 + 2.17630492708372*m.b554 <= 3.27491721575183)
m.c1542 = Constraint(expr= m.x139 - m.x140 + 2.17630492708372*m.b555 <= 3.27491721575183)
m.c1543 = Constraint(expr= m.x140 - m.x141 + 2.17630492708372*m.b556 <= 3.27491721575183)
m.c1544 = Constraint(expr= m.x141 - m.x142 + 2.17630492708372*m.b557 <= 3.27491721575183)
m.c1545 = Constraint(expr= m.x142 - m.x143 + 2.17630492708372*m.b558 <= 3.27491721575183)
m.c1546 = Constraint(expr= m.x143 - m.x144 + 2.17630492708372*m.b559 <= 3.27491721575183)
m.c1547 = Constraint(expr= m.x145 - m.x146 + 2.08253202659828*m.b549 <= 3.18114431526639)
m.c1548 = Constraint(expr= m.x146 - m.x147 + 2.08253202659828*m.b550 <= 3.18114431526639)
m.c1549 = Constraint(expr= m.x147 - m.x148 + 2.08253202659828*m.b551 <= 3.18114431526639)
m.c1550 = Constraint(expr= m.x148 - m.x149 + 2.08253202659828*m.b552 <= 3.18114431526639)
m.c1551 = Constraint(expr= m.x149 - m.x150 + 2.08253202659828*m.b553 <= 3.18114431526639)
m.c1552 = Constraint(expr= m.x150 - m.x151 + 2.08253202659828*m.b554 <= 3.18114431526639)
m.c1553 = Constraint(expr= m.x151 - m.x152 + 2.08253202659828*m.b555 <= 3.18114431526639)
m.c1554 = Constraint(expr= m.x152 - m.x153 + 2.08253202659828*m.b556 <= 3.18114431526639)
m.c1555 = Constraint(expr= m.x153 - m.x154 + 2.08253202659828*m.b557 <= 3.18114431526639)
m.c1556 = Constraint(expr= m.x154 - m.x155 + 2.08253202659828*m.b558 <= 3.18114431526639)
m.c1557 = Constraint(expr= m.x155 - m.x156 + 2.08253202659828*m.b559 <= 3.18114431526639)
m.c1558 = Constraint(expr= m.x157 - m.x158 + 1.88519141402061*m.b549 <= 2.98380370268872)
m.c1559 = Constraint(expr= m.x158 - m.x159 + 1.88519141402061*m.b550 <= 2.98380370268872)
m.c1560 = Constraint(expr= m.x159 - m.x160 + 1.88519141402061*m.b551 <= 2.98380370268872)
m.c1561 = Constraint(expr= m.x160 - m.x161 + 1.88519141402061*m.b552 <= 2.98380370268872)
m.c1562 = Constraint(expr= m.x161 - m.x162 + 1.88519141402061*m.b553 <= 2.98380370268872)
m.c1563 = Constraint(expr= m.x162 - m.x163 + 1.88519141402061*m.b554 <= 2.98380370268872)
m.c1564 = Constraint(expr= m.x163 - m.x164 + 1.88519141402061*m.b555 <= 2.98380370268872)
m.c1565 = Constraint(expr= m.x164 - m.x165 + 1.88519141402061*m.b556 <= 2.98380370268872)
m.c1566 = Constraint(expr= m.x165 - m.x166 + 1.88519141402061*m.b557 <= 2.98380370268872)
m.c1567 = Constraint(expr= m.x166 - m.x167 + 1.88519141402061*m.b558 <= 2.98380370268872)
m.c1568 = Constraint(expr= m.x167 - m.x168 + 1.88519141402061*m.b559 <= 2.98380370268872)
m.c1569 = Constraint(expr= m.x169 - m.x170 + 2.45174823131027*m.b549 <= 3.55036051997838)
m.c1570 = Constraint(expr= m.x170 - m.x171 + 2.45174823131027*m.b550 <= 3.55036051997838)
m.c1571 = Constraint(expr= m.x171 - m.x172 + 2.45174823131027*m.b551 <= 3.55036051997838)
m.c1572 = Constraint(expr= m.x172 - m.x173 + 2.45174823131027*m.b552 <= 3.55036051997838)
m.c1573 = Constraint(expr= m.x173 - m.x174 + 2.45174823131027*m.b553 <= 3.55036051997838)
m.c1574 = Constraint(expr= m.x174 - m.x175 + 2.45174823131027*m.b554 <= 3.55036051997838)
m.c1575 = Constraint(expr= m.x175 - m.x176 + 2.45174823131027*m.b555 <= 3.55036051997838)
m.c1576 = Constraint(expr= m.x176 - m.x177 + 2.45174823131027*m.b556 <= 3.55036051997838)
m.c1577 = Constraint(expr= m.x177 - m.x178 + 2.45174823131027*m.b557 <= 3.55036051997838)
m.c1578 = Constraint(expr= m.x178 - m.x179 + 2.45174823131027*m.b558 <= 3.55036051997838)
m.c1579 = Constraint(expr= m.x179 - m.x180 + 2.45174823131027*m.b559 <= 3.55036051997838)
m.c1580 = Constraint(expr= m.x181 - m.x182 + 2.83303050362969*m.b549 <= 3.9316427922978)
m.c1581 = Constraint(expr= m.x182 - m.x183 + 2.83303050362969*m.b550 <= 3.9316427922978)
m.c1582 = Constraint(expr= m.x183 - m.x184 + 2.83303050362969*m.b551 <= 3.9316427922978)
m.c1583 = Constraint(expr= m.x184 - m.x185 + 2.83303050362969*m.b552 <= 3.9316427922978)
m.c1584 = Constraint(expr= m.x185 - m.x186 + 2.83303050362969*m.b553 <= 3.9316427922978)
m.c1585 = Constraint(expr= m.x186 - m.x187 + 2.83303050362969*m.b554 <= 3.9316427922978)
m.c1586 = Constraint(expr= m.x187 - m.x188 + 2.83303050362969*m.b555 <= 3.9316427922978)
m.c1587 = Constraint(expr= m.x188 - m.x189 + 2.83303050362969*m.b556 <= 3.9316427922978)
m.c1588 = Constraint(expr= m.x189 - m.x190 + 2.83303050362969*m.b557 <= 3.9316427922978)
m.c1589 = Constraint(expr= m.x190 - | |
from models import DCGAN_64_Discriminator, DCGAN_64_Generator, StandardCNN_Discriminator, StandardCNN_Generator, InceptionV3
from torch.utils.data import Dataset as dst
from glob import glob
import torch
import torch.nn as nn
from torch.cuda import FloatTensor as Tensor
from torch import clamp
from torch.autograd import grad
import torchvision.transforms as transforms
from torchvision.datasets import CIFAR10
import matplotlib.pyplot as plt
import numpy as np
from torchvision.utils import save_image
import os
import sys
from PIL import Image
from argparse import ArgumentTypeError
import tarfile
import urllib
from scipy import linalg
try:
from tqdm import tqdm
except ImportError:
def tqdm(x): return x
class Dataset(dst):
def __init__(self, root, transforms, download):
if(not os.path.exists(root) and download):
print("Cat dataset is not found, it will be downloaded to datasets/cats_64. (24 MB)")
cats_64_url = "https://drive.google.com/uc?id=19vLd3nuT3amuW4xlN7kTXoSYqWZlRRqx&export=download"
urllib.request.urlretrieve(cats_64_url, os.path.join("datasets", "cats.tar.gz"))
print("Cat dataset is downloaded, extracting...")
cats_tar = tarfile.open(os.path.join("datasets", "cats.tar.gz"))
cats_tar.extractall("datasets")
cats_tar.close()
print("Extraction successful!")
self.files = sorted(glob(root + '/*.png')) + sorted(glob(root + '/*.jpg'))
self.transforms = transforms
def __getitem__(self,index):
return self.transforms(Image.open(self.files[index]))
def __len__(self):
return len(self.files)
def get_model(args):
"""
Returns the generator and discriminator models for the given model architecture and parameters such as no_BN, all_tanh and spec_norm.
StandardCNN is the architecture described in the appendices I.1 of the paper, and DCGAN_64 is in appendices I.2.
"""
#
if(args.model == "standard_cnn"):
return (StandardCNN_Generator(no_BN = args.no_BN, all_tanh=args.all_tanh).to(args.device),
StandardCNN_Discriminator(no_BN = args.no_BN, all_tanh=args.all_tanh, spec_norm = args.spec_norm).to(args.device))
if(args.model == "dcgan_64"):
return (DCGAN_64_Generator(no_BN = args.no_BN, all_tanh=args.all_tanh).to(args.device),
DCGAN_64_Discriminator(no_BN = args.no_BN, all_tanh=args.all_tanh, spec_norm = args.spec_norm).to(args.device))
def get_loss(loss_type):
"""
Returns the generator and discriminator losses for the given loss type.
Relativistic generator losses use discriminator output for the real samples.
Relativistic average losses use the average of discriminator outputs for both real and fake samples.
Pre-calculated gradient penalty term is added to the discriminator losses using gradient penalty.
"""
if(loss_type == "sgan"):
loss = nn.BCEWithLogitsLoss()
def disc_loss(C_real, C_fake):
ones = torch.ones_like(C_fake)
zeros = torch.zeros_like(C_fake)
return (loss(C_real,ones) + loss(C_fake,zeros))
def gen_loss(C_fake):
ones = torch.ones_like(C_fake)
return loss(C_fake,ones)
elif(loss_type == "rsgan"):
loss = nn.BCEWithLogitsLoss()
def disc_loss(C_real, C_fake):
ones = torch.ones_like(C_fake)
return loss((C_real-C_fake),ones)
def gen_loss(C_real, C_fake):
ones = torch.ones_like(C_fake)
return loss((C_fake-C_real),ones)
elif(loss_type == "rasgan"):
loss = nn.BCEWithLogitsLoss()
def disc_loss(C_real, C_fake, C_avg_real, C_avg_fake):
ones = torch.ones_like(C_fake)
zeros = torch.zeros_like(C_fake)
return (loss((C_real-C_avg_fake),ones) + loss((C_fake-C_avg_real),zeros))
def gen_loss(C_real, C_fake, C_avg_real, C_avg_fake):
ones = torch.ones_like(C_fake)
zeros = torch.zeros_like(C_fake)
return (loss((C_real-C_avg_fake),zeros) + loss((C_fake-C_avg_real),ones))
elif(loss_type == "lsgan"):
loss = nn.MSELoss()
def disc_loss(C_real, C_fake):
ones = torch.ones_like(C_fake)
zeros = torch.zeros_like(C_fake)
return (loss(C_real, zeros) + loss(C_fake, ones))
def gen_loss(C_fake):
zeros = torch.zeros_like(C_fake)
return loss(C_fake,zeros)
elif(loss_type == "ralsgan"):
loss = nn.MSELoss()
def disc_loss(C_real, C_fake, C_avg_real, C_avg_fake):
ones = torch.ones_like(C_fake)
return (loss((C_real-C_avg_fake), ones) + loss((C_fake-C_avg_real), -ones))
def gen_loss(C_real, C_fake, C_avg_real, C_avg_fake):
ones = torch.ones_like(C_fake)
return (loss((C_fake-C_avg_real), ones) + loss((C_real-C_avg_fake),-ones))
elif(loss_type == "hingegan"):
def disc_loss(C_real, C_fake):
ones = torch.ones_like(C_fake)
return (clamp((ones-C_real), min=0).mean() + clamp((C_fake+ones), min=0).mean())
def gen_loss(C_fake):
return -C_fake.mean()
elif(loss_type == "rahingegan"):
def disc_loss(C_real, C_fake, C_avg_real, C_avg_fake):
ones = torch.ones_like(C_fake)
return (clamp((ones - C_real + C_avg_fake), min=0).mean() + clamp((ones + C_fake-C_avg_real), min=0).mean())
def gen_loss(C_real, C_fake, C_avg_real, C_avg_fake):
ones = torch.ones_like(C_fake)
return (clamp((ones - C_fake + C_avg_real), min=0).mean() + clamp((ones + C_real - C_avg_fake), min=0).mean())
elif(loss_type == "wgan-gp"):
def disc_loss(C_real, C_fake, grad_pen):
return (-C_real.mean() + C_fake.mean() + grad_pen)
def gen_loss(C_fake):
return -C_fake.mean()
elif(loss_type == "rsgan-gp"):
loss = nn.BCEWithLogitsLoss()
def disc_loss(C_real, C_fake, grad_pen):
ones = torch.ones_like(C_fake)
return (loss((C_real - C_fake),ones) + grad_pen)
def gen_loss(C_real, C_fake):
ones = torch.ones_like(C_fake)
return loss((C_fake-C_real), ones)
elif(loss_type == "rasgan-gp"):
loss = nn.BCEWithLogitsLoss()
def disc_loss(C_real, C_fake, C_avg_real, C_avg_fake, grad_pen):
ones = torch.ones_like(C_fake)
zeros = torch.zeros_like(C_fake)
return (loss((C_real-C_avg_fake),ones) + loss((C_fake-C_avg_real),zeros) + grad_pen)
def gen_loss(C_real, C_fake, C_avg_real, C_avg_fake):
ones = torch.ones_like(C_fake)
zeros = torch.zeros_like(C_fake)
return (loss((C_real-C_avg_fake),zeros) + loss((C_fake-C_avg_real),ones))
return gen_loss,disc_loss
def grad_penalty(discriminator, x_hat, Lambda):
"""
Calculates gradient penalty given the interpolated input x_hat.
Lambda is the gradient penalty coefficient.
"""
x_hat.requires_grad_(True)
disc_out = discriminator(x_hat)
grads = grad(outputs=disc_out, inputs=x_hat,
grad_outputs = torch.ones_like(disc_out),
create_graph=True)[0].view(x_hat.size(0),-1)
return Lambda * torch.mean((grads.norm(p=2, dim=1) - 1)**2)
def get_dataset(dataset):
"""
Returns the Dataset object of the given dataset, "cifar10" or "cat"
For "cifar10", the class torchvision.datasets.CIFAR10 is used. It automatically downloads the dataset if it is not downloaded before.
For "cat", the images in the folder "./datasets/cat_64" will be used for creating the dataset. If the folder does not exist, it will be automatically downloaded.
"""
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
if(dataset == "cifar10"):
return CIFAR10(root='./datasets', train=True,
download=True, transform=transform)
if(dataset == "cat"):
return Dataset(root=os.path.join("datasets", "cats_64"), transforms=transform, download=True)
def cycle(iterable, dset, device):
"""
Restarts the dataloader iteration if it is iterated completely.
Returns an iterable object of the batches which are sent to the preferred device(cpu or gpu).
Returns x[0] for "cifar10" dataset since it returns a list of [images, labels] for its batches.
"""
while True:
for x in iterable:
yield (x[0].to(device) if dset=='cifar10' else x.to(device))
def is_negative(value):
"""
Checks if the given value as the argument is negative or not. If it is negative, give an error.
Used for checking negative iteration frequency arguments.
"""
if int(value) < 0:
raise ArgumentTypeError(f"{value} should be non-negative")
return int(value)
def sample_fid(generator, it, args, batch_size=100):
"""
Generates samples to be used for calculating FID and saves them as a compressed numpy array.
The number of samples to be generated is equal to the number of images in the training set (args.fid_sample).
"""
generator.eval()
with torch.no_grad():
for i in range(0,args.fid_sample, batch_size):
sys.stdout.write(f"\rGenerating {i}/{args.fid_sample}")
if(args.fid_sample < batch_size+i):
batch_size = args.fid_sample-i
generated_samples = (generator(torch.randn(size=(batch_size,128,1,1), device=args.device))+1)*127.5
if(i == 0):
arr = np.round_(generated_samples.cpu().permute(0,2,3,1).numpy()).astype(np.uint8)
else:
arr = np.concatenate((arr, np.round_(generated_samples.cpu().permute(0,2,3,1).numpy()).astype(np.uint8)), axis=0)
np.savez_compressed(f"samples/{args.dataset}_{args.loss_type}_n_d_{args.d_iter}_b1_{args.beta1}_b2_{args.beta2}_b_size_{args.batch_size}_lr_{args.lr}_{it+1}" + ( "_noBN" if args.no_BN else "") + ("_alltanh" if args.all_tanh else ""), images=arr)
generator.train()
def extract_statistics(path, model, batch_size, use_cuda, verbose = False):
"""
Computes and returns the mean and covariance matrix of the InceptionV3 features of the image dataset at the given path.
Arguments:
path: Dataset path.
model: The model for feature extraction (should be instance of models.InceptionV3)
batch_size: Batch size to be used for feature extraction.
use_cuda: Boolean variable, use CUDA if True.
verbose: Boolean variable, display progress if True.
Return Values:
mu: Mean of the extracted features.
sigma: Covariance matrix of the extracted features.
"""
model.eval()
# Load the data.
images = np.load(path)["images"]
# Check for possible errors due to batch size \ number of samples
if batch_size > len(images):
print("Warning: Batch size larger than number of samples when computing FIDs!")
batch_size = len(images)
remainder = len(images) % batch_size
# Get the number of batches
number_of_batches = len(images) // batch_size
# Define the array of feature vectors
features = np.empty(shape = (len(images), 2048))
if verbose:
print("Computing InceptionV3 activations...")
for i in tqdm(range(number_of_batches)) if verbose else range(number_of_batches):
# Get current batch
batch = images[i * batch_size:(i + 1) * batch_size].astype(np.float32)
# Reshape to (N, C, H, W)
batch = batch.transpose(0, 3, 1, 2)
# Scale down to [0, 1]
batch /= 255
# Convert batch to Tensor and load it to the selected device
batch = torch.from_numpy(batch).type(torch.FloatTensor)
if use_cuda:
batch = batch.cuda()
# Compute InceptionV3 features
batch_of_features = model(batch)
# Apply adaptive avg pooling to decrease number of features from
# 8 x 8 x 2048 to 1 x 1 x 2048 as per instructions from the original paper
batch_of_features = nn.functional.adaptive_avg_pool2d(batch_of_features, output_size = (1,1))
# Append batch of features to the feature list (after removing unnecessary dimensions).
batch_of_features = batch_of_features.cpu().data.numpy().reshape(batch_size, 2048)
features[i * batch_size:(i+1) * batch_size] = batch_of_features
# If the numnber of samples is not a multiple of batch size, handle remanining examples.
if remainder != 0:
i += 1
# reshape and rescale
batch = images[i * batch_size:].astype(np.float32)
batch = batch.transpose(0, 3, 1, 2)
batch /= 255
# convert to tensor
batch = torch.from_numpy(batch).type(torch.FloatTensor)
if use_cuda:
batch = batch.cuda()
# process and save
batch = nn.functional.adaptive_avg_pool2d(model(batch), output_size = (1,1))
batch = batch.cpu().data.numpy().reshape(remainder, 2048)
features[i * batch_size:] = batch
if verbose:
print("InceptionV3 activations computed succesfully!")
# Compute feature statistics
| |
#!/usr/bin/env python3
from contextlib import closing
import argparse
import multiprocessing
import os
import os.path
import re
import socket
import subprocess
import sys
import urllib.parse
import urllib3
import bs4
import dulwich.index
import dulwich.objects
import dulwich.pack
import requests
import socks
def printf(fmt, *args, file=sys.stdout):
if args:
fmt = fmt % args
file.write(fmt)
file.flush()
def is_html(response):
''' Return True if the response is a HTML webpage '''
return '<html>' in response.text
def get_indexed_files(response):
''' Return all the files in the directory index webpage '''
html = bs4.BeautifulSoup(response.text, 'html.parser')
files = []
for link in html.find_all('a'):
url = urllib.parse.urlparse(link.get('href'))
if (url.path and
url.path != '.' and
url.path != '..' and
not url.path.startswith('/') and
not url.scheme and
not url.netloc):
files.append(url.path)
return files
def create_intermediate_dirs(path):
''' Create intermediate directories, if necessary '''
dirname, basename = os.path.split(path)
if dirname and not os.path.exists(dirname):
try:
os.makedirs(dirname)
except FileExistsError:
pass # race condition
def get_referenced_sha1(obj_file):
''' Return all the referenced SHA1 in the given object file '''
objs = []
if isinstance(obj_file, dulwich.objects.Commit):
objs.append(obj_file.tree.decode())
for parent in obj_file.parents:
objs.append(parent.decode())
elif isinstance(obj_file, dulwich.objects.Tree):
for item in obj_file.iteritems():
objs.append(item.sha.decode())
elif isinstance(obj_file, dulwich.objects.Blob):
pass
else:
printf('error: unexpected object type: %r\n' % obj_file, file=sys.stderr)
sys.exit(1)
return objs
class Worker(multiprocessing.Process):
''' Worker for process_tasks '''
def __init__(self, pending_tasks, tasks_done, args):
super().__init__()
self.daemon = True
self.pending_tasks = pending_tasks
self.tasks_done = tasks_done
self.args = args
def run(self):
# initialize process
self.init(*self.args)
# fetch and do tasks
while True:
task = self.pending_tasks.get(block=True)
if task is None: # end signal
return
result = self.do_task(task, *self.args)
assert isinstance(result, list), 'do_task() should return a list of tasks'
self.tasks_done.put(result)
def init(self, *args):
raise NotImplementedError
def do_task(self, task, *args):
raise NotImplementedError
def process_tasks(initial_tasks, worker, jobs, args=(), tasks_done=None):
''' Process tasks in parallel '''
if not initial_tasks:
return
tasks_seen = set(tasks_done) if tasks_done else set()
pending_tasks = multiprocessing.Queue()
tasks_done = multiprocessing.Queue()
num_pending_tasks = 0
# add all initial tasks in the queue
for task in initial_tasks:
assert task is not None
if task not in tasks_seen:
pending_tasks.put(task)
num_pending_tasks += 1
tasks_seen.add(task)
# initialize processes
processes = [worker(pending_tasks, tasks_done, args) for _ in range(jobs)]
# launch them all
for p in processes:
p.start()
# collect task results
while num_pending_tasks > 0:
task_result = tasks_done.get(block=True)
num_pending_tasks -= 1
for task in task_result:
assert task is not None
if task not in tasks_seen:
pending_tasks.put(task)
num_pending_tasks += 1
tasks_seen.add(task)
# send termination signal (task=None)
for _ in range(jobs):
pending_tasks.put(None)
# join all
for p in processes:
p.join()
class DownloadWorker(Worker):
''' Download a list of files '''
def init(self, url, directory, retry, timeout):
self.session = requests.Session()
self.session.verify = False
self.session.mount(url, requests.adapters.HTTPAdapter(max_retries=retry))
def do_task(self, filepath, url, directory, retry, timeout):
if os.path.isfile(os.path.join(directory, filepath)):
printf('[-] Already downloaded %s/%s\n', url, filepath)
return []
with closing(self.session.get('%s/%s' % (url, filepath),
allow_redirects=False,
stream=True,
timeout=timeout)) as response:
printf('[-] Fetching %s/%s [%d]\n', url, filepath, response.status_code)
if response.status_code != 200:
return []
abspath = os.path.abspath(os.path.join(directory, filepath))
create_intermediate_dirs(abspath)
# write file
with open(abspath, 'wb') as f:
for chunk in response.iter_content(4096):
f.write(chunk)
return []
class RecursiveDownloadWorker(DownloadWorker):
''' Download a directory recursively '''
def do_task(self, filepath, url, directory, retry, timeout):
if os.path.isfile(os.path.join(directory, filepath)):
printf('[-] Already downloaded %s/%s\n', url, filepath)
return []
with closing(self.session.get('%s/%s' % (url, filepath),
allow_redirects=False,
stream=True,
timeout=timeout)) as response:
printf('[-] Fetching %s/%s [%d]\n', url, filepath, response.status_code)
if (response.status_code in (301, 302) and
'Location' in response.headers and
response.headers['Location'].endswith(filepath + '/')):
return [filepath + '/']
if response.status_code != 200:
return []
if filepath.endswith('/'): # directory index
assert is_html(response)
return [filepath + filename for filename in get_indexed_files(response)]
else: # file
abspath = os.path.abspath(os.path.join(directory, filepath))
create_intermediate_dirs(abspath)
# write file
with open(abspath, 'wb') as f:
for chunk in response.iter_content(4096):
f.write(chunk)
return []
class FindRefsWorker(DownloadWorker):
''' Find refs/ '''
def do_task(self, filepath, url, directory, retry, timeout):
response = self.session.get('%s/%s' % (url, filepath),
allow_redirects=False,
timeout=timeout)
printf('[-] Fetching %s/%s [%d]\n', url, filepath, response.status_code)
if response.status_code != 200:
return []
abspath = os.path.abspath(os.path.join(directory, filepath))
create_intermediate_dirs(abspath)
# write file
with open(abspath, 'w') as f:
f.write(response.text)
# find refs
tasks = []
for ref in re.findall(r'(refs(/[a-zA-Z0-9\-\.\_\*]+)+)', response.text):
ref = ref[0]
if not ref.endswith('*'):
tasks.append('.git/%s' % ref)
tasks.append('.git/logs/%s' % ref)
return tasks
class FindObjectsWorker(DownloadWorker):
''' Find objects '''
def do_task(self, obj, url, directory, retry, timeout):
filepath = '.git/objects/%s/%s' % (obj[:2], obj[2:])
if os.path.isfile(os.path.join(directory, filepath)):
printf('[-] Already downloaded %s/%s\n', url, filepath)
else:
response = self.session.get('%s/%s' % (url, filepath),
allow_redirects=False,
timeout=timeout)
printf('[-] Fetching %s/%s [%d]\n', url, filepath, response.status_code)
if response.status_code != 200:
return []
abspath = os.path.abspath(os.path.join(directory, filepath))
create_intermediate_dirs(abspath)
# write file
with open(abspath, 'wb') as f:
f.write(response.content)
abspath = os.path.abspath(os.path.join(directory, filepath))
# parse object file to find other objects
obj_file = dulwich.objects.ShaFile.from_path(abspath)
return get_referenced_sha1(obj_file)
def fetch_git(url, directory, jobs, retry, timeout):
''' Dump a git repository into the output directory '''
assert os.path.isdir(directory), '%s is not a directory' % directory
assert jobs >= 1, 'invalid number of jobs'
assert retry >= 1, 'invalid number of retries'
assert timeout >= 1, 'invalid timeout'
if os.listdir(directory):
printf("Warning: Destination '%s' is not empty\n", directory)
# find base url
url = url.rstrip('/')
if url.endswith('HEAD'):
url = url[:-4]
url = url.rstrip('/')
if url.endswith('.git'):
url = url[:-4]
url = url.rstrip('/')
# check for /.git/HEAD
printf('[-] Testing %s/.git/HEAD ', url)
response = requests.get('%s/.git/HEAD' % url, verify=False, allow_redirects=False)
printf('[%d]\n', response.status_code)
if response.status_code != 200:
printf('error: %s/.git/HEAD does not exist\n', url, file=sys.stderr)
return 1
elif not response.text.startswith('ref:'):
printf('error: %s/.git/HEAD is not a git HEAD file\n', url, file=sys.stderr)
return 1
# check for directory listing
printf('[-] Testing %s/.git/ ', url)
response = requests.get('%s/.git/' % url, verify=False, allow_redirects=False)
printf('[%d]\n', response.status_code)
if response.status_code == 200 and is_html(response) and 'HEAD' in get_indexed_files(response):
printf('[-] Fetching .git recursively\n')
process_tasks(['.git/', '.gitignore'],
RecursiveDownloadWorker,
jobs,
args=(url, directory, retry, timeout))
printf('[-] Running git checkout .\n')
os.chdir(directory)
subprocess.check_call(['git', 'checkout', '.'])
return 0
# no directory listing
printf('[-] Fetching common files\n')
tasks = [
'.gitignore',
'.git/COMMIT_EDITMSG',
'.git/description',
'.git/hooks/applypatch-msg.sample',
'.git/hooks/applypatch-msg.sample',
'.git/hooks/applypatch-msg.sample',
'.git/hooks/commit-msg.sample',
'.git/hooks/post-commit.sample',
'.git/hooks/post-receive.sample',
'.git/hooks/post-update.sample',
'.git/hooks/pre-applypatch.sample',
'.git/hooks/pre-commit.sample',
'.git/hooks/pre-push.sample',
'.git/hooks/pre-rebase.sample',
'.git/hooks/pre-receive.sample',
'.git/hooks/prepare-commit-msg.sample',
'.git/hooks/update.sample',
'.git/index',
'.git/info/exclude',
'.git/objects/info/packs',
]
process_tasks(tasks,
DownloadWorker,
jobs,
args=(url, directory, retry, timeout))
# find refs
printf('[-] Finding refs/\n')
tasks = [
'.git/FETCH_HEAD',
'.git/HEAD',
'.git/ORIG_HEAD',
'.git/config',
'.git/info/refs',
'.git/logs/HEAD',
'.git/logs/refs/heads/master',
'.git/logs/refs/remotes/origin/HEAD',
'.git/logs/refs/remotes/origin/master',
'.git/logs/refs/stash',
'.git/packed-refs',
'.git/refs/heads/master',
'.git/refs/remotes/origin/HEAD',
'.git/refs/remotes/origin/master',
'.git/refs/stash',
'.git/refs/wip/wtree/refs/heads/master', #Magit
'.git/refs/wip/index/refs/heads/master' #Magit
]
process_tasks(tasks,
FindRefsWorker,
jobs,
args=(url, directory, retry, timeout))
# find packs
printf('[-] Finding packs\n')
tasks = []
# use .git/objects/info/packs to find packs
info_packs_path = os.path.join(directory, '.git', 'objects', 'info', 'packs')
if os.path.exists(info_packs_path):
with open(info_packs_path, 'r') as f:
info_packs = f.read()
for sha1 in re.findall(r'pack-([a-f0-9]{40})\.pack', info_packs):
tasks.append('.git/objects/pack/pack-%s.idx' % sha1)
tasks.append('.git/objects/pack/pack-%s.pack' % sha1)
process_tasks(tasks,
DownloadWorker,
jobs,
args=(url, directory, retry, timeout))
# find objects
printf('[-] Finding objects\n')
objs = set()
packed_objs = set()
# .git/packed-refs, .git/info/refs, .git/refs/*, .git/logs/*
files = [
os.path.join(directory, '.git', 'packed-refs'),
os.path.join(directory, '.git', 'info', 'refs'),
os.path.join(directory, '.git', 'FETCH_HEAD'),
os.path.join(directory, '.git', 'ORIG_HEAD'),
]
for dirpath, _, filenames in os.walk(os.path.join(directory, '.git', 'refs')):
for filename in filenames:
files.append(os.path.join(dirpath, filename))
for dirpath, _, filenames in os.walk(os.path.join(directory, '.git', 'logs')):
for filename in filenames:
files.append(os.path.join(dirpath, filename))
for filepath in files:
if not os.path.exists(filepath):
continue
with open(filepath, 'r') as f:
content = f.read()
for obj in re.findall(r'(^|\s)([a-f0-9]{40})($|\s)', content):
obj = obj[1]
objs.add(obj)
# use .git/index to find objects
index_path = os.path.join(directory, '.git', 'index')
if os.path.exists(index_path):
index = dulwich.index.Index(index_path)
for entry in index.iterblobs():
objs.add(entry[1].decode())
# use packs to find more objects to fetch, and objects that are packed
pack_file_dir = os.path.join(directory, '.git', 'objects', 'pack')
if os.path.isdir(pack_file_dir):
for filename in os.listdir(pack_file_dir):
if filename.startswith('pack-') and filename.endswith('.pack'):
pack_data_path = os.path.join(pack_file_dir, filename)
pack_idx_path = os.path.join(pack_file_dir, filename[:-5] + '.idx')
pack_data = dulwich.pack.PackData(pack_data_path)
pack_idx = dulwich.pack.load_pack_index(pack_idx_path)
pack = dulwich.pack.Pack.from_objects(pack_data, pack_idx)
for obj_file in pack.iterobjects():
packed_objs.add(obj_file.sha().hexdigest())
objs |= set(get_referenced_sha1(obj_file))
# fetch all objects
printf('[-] Fetching objects\n')
process_tasks(objs,
FindObjectsWorker,
jobs,
args=(url, directory, retry, timeout),
tasks_done=packed_objs)
# git checkout
printf('[-] Running git checkout .\n')
os.chdir(directory)
# ignore errors
subprocess.call(['git', 'checkout', '.'], stderr=open(os.devnull, 'wb'))
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(usage='%(prog)s [options] URL DIR',
description='Dump a git repository from a website.')
parser.add_argument('url', metavar='URL',
help='url')
parser.add_argument('directory', metavar='DIR',
help='output directory')
parser.add_argument('--proxy',
help='use the specified proxy')
parser.add_argument('-j', '--jobs', type=int, default=10,
help='number of simultaneous requests')
parser.add_argument('-r', '--retry', type=int, default=3,
help='number of request attempts before giving up')
parser.add_argument('-t', '--timeout', type=int, default=3,
help='maximum time in seconds before giving up')
args = parser.parse_args()
# jobs
| |
= metadata_service_pb2.NameRequest(name=model_name)
response = self.metadata_store_stub.getModelByName(request)
return _unwrap_model_response(response)
def register_model(self, model_name, project_id, model_desc=None) -> ModelMeta:
"""
register a model in metadata store
:param model_name: Name of registered model
:param project_id: Project id which registered model corresponded to.
:param model_desc: Description of registered model
:return: A single :py:class:`ai_flow.meta.model_meta.ModelMeta` object.
"""
model_request = ModelProto(name=model_name,
model_desc=stringValue(model_desc),
project_id=int64Value(project_id))
request = metadata_service_pb2.RegisterModelRequest(model=model_request)
response = self.metadata_store_stub.registerModel(request)
return _unwrap_model_response(response)
def delete_model_by_id(self, model_id) -> Status:
"""
delete registered model by model id.
:param model_id: Id of registered model
:return: Status.OK if registered model is successfully deleted,
Status.ERROR if registered model does not exist otherwise.
"""
request = metadata_service_pb2.IdRequest(id=model_id)
response = self.metadata_store_stub.deleteModelById(request)
return _unwrap_delete_response(response)
def delete_model_by_name(self, model_name) -> Status:
"""
delete registered model by model name.
:param model_name: Name of registered model
:return: Status.OK if registered model is successfully deleted,
Status.ERROR if registered model does not exist otherwise.
"""
request = metadata_service_pb2.NameRequest(name=model_name)
response = self.metadata_store_stub.deleteModelByName(request)
return _unwrap_delete_response(response)
'''model version relation api'''
def get_model_version_relation_by_version(self, version, model_id) -> Optional[ModelVersionRelationMeta]:
"""
get a specific model version relation in metadata store by the model version name.
:param version: the model version name
:param model_id: the model id corresponded to the model version
:return: A single :py:class:`ai_flow.meta.model_relation_meta.ModelVersionRelationMeta` object
if the model version exists, Otherwise, returns None if the model version does not exist.
"""
request = metadata_service_pb2.ModelVersionNameRequest(name=version, model_id=model_id)
response = self.metadata_store_stub.getModelVersionRelationByVersion(request)
return _unwrap_model_version_relation_response(response)
def register_model_version_relation(self, version, model_id,
project_snapshot_id=None) -> ModelVersionRelationMeta:
"""
register a model version relation in metadata store.
:param version: the specific model version
:param model_id: the model id corresponded to the model version
:param project_snapshot_id: the project snapshot id corresponded to the model version
:return: A single :py:class:`ai_flow.meta.model_relation_meta.ModelVersionRelationMeta` object.
"""
model_version = ModelVersionRelationProto(version=int32Value(version), model_id=int64Value(model_id),
project_snapshot_id=int64Value(project_snapshot_id))
request = metadata_service_pb2.RegisterModelVersionRelationRequest(model_version_relation=model_version)
response = self.metadata_store_stub.registerModelVersionRelation(request)
return _unwrap_model_version_relation_response(response)
def list_model_version_relation(self, model_id, page_size, offset) -> List[ModelVersionRelationMeta]:
"""
List registered model version relations in metadata store.
:param model_id: the model id corresponded to the model version
:param page_size: the limitation of the listed model version relations.
:param offset: the offset of listed model version relations.
:return: List of :py:class:`ai_flow.meta.model_relation_meta.ModelRelationMeta` objects,
return None if no model version relations to be listed.
"""
request = metadata_service_pb2.ListModelVersionRelationRequest(model_id=model_id, page_size=page_size,
offset=offset)
response = self.metadata_store_stub.listModelVersionRelation(request)
return _unwrap_model_version_relation_list_response(response)
def delete_model_version_relation_by_version(self, version, model_id) -> Status:
"""
Delete the registered model version by model version name .
:param version: the model version name
:param model_id: the model id corresponded to the model version
:return: Status.OK if the model version is successfully deleted,
Status.ERROR if the model version does not exist otherwise.
"""
request = metadata_service_pb2.ModelVersionNameRequest(name=version, model_id=model_id)
response = self.metadata_store_stub.deleteModelVersionRelationByVersion(request)
return _unwrap_delete_response(response)
'''model version api'''
def get_model_version_by_version(self, version, model_id) -> Optional[ModelVersionMeta]:
"""
Get a specific model version in metadata store by model version name.
:param version: User-defined version of registered model
:param model_id: The model id corresponded to the model version
:return: A single :py:class:`ai_flow.meta.model_meta.ModelVersionMeta` object if the model version exists,
Otherwise, returns None if the model version does not exist.
"""
request = metadata_service_pb2.ModelVersionNameRequest(name=version, model_id=model_id)
response = self.metadata_store_stub.getModelVersionByVersion(request)
return _unwrap_model_version_response(response)
def register_model_version(self, model, model_path, project_snapshot_id=None,
model_type=None, version_desc=None,
current_stage=ModelVersionStage.GENERATED) -> ModelVersionMeta:
"""
register a model version in metadata store.
:param model: Model id or model meta of registered model corresponded to model version
:param model_path: Source path where the AIFlow model is stored.
:param project_snapshot_id: Id of project snapshot corresponded to model version
:param model_type: (Optional) Type of AIFlow registered model option.
:param version_desc: (Optional) Description of registered model version.
:param current_stage: (Optional) Stage of registered model version
:return: A single :py:class:`ai_flow.meta.model_meta.ModelVersionMeta` object.
"""
if isinstance(model, int):
model_id = model
elif isinstance(model, ModelMeta):
model_id = model.uuid
else:
raise Exception("can not recognize model {}".format(model))
model_version = ModelVersionProto(version=None,
model_id=int64Value(model_id),
project_snapshot_id=int64Value(project_snapshot_id),
model_path=stringValue(model_path),
model_type=stringValue(model_type),
version_desc=stringValue(version_desc),
current_stage=current_stage)
request = metadata_service_pb2.RegisterModelVersionRequest(model_version=model_version)
response = self.metadata_store_stub.registerModelVersion(request)
model_version_meta = _unwrap_model_version_response(response)
notification_client = get_notification_client()
if notification_client is not None:
event_type = MODEL_VERSION_TO_EVENT_TYPE.get(ModelVersionStage.Value(model_version_meta.current_stage))
model_meta = self.get_model_by_id(model_version_meta.model_id)
model_version_detail = ModelVersionDetail(model_name=model_meta.name,
model_version=model_version_meta.version,
model_path=model_version_meta.model_path,
model_type=model_version_meta.model_type,
version_desc=model_version_meta.version_desc,
current_stage=ModelVersionStage.Value(model_version_meta.current_stage)
)
notification_client.send_event(BaseEvent(model_version_detail.model_name,
json.dumps(model_version_detail.__dict__),
event_type))
return model_version_meta
def delete_model_version_by_version(self, version, model_id) -> Status:
"""
Delete registered model version by model version name .
:param version: the model version name
:param model_id: the model id corresponded to the model version
:return: Status.OK if the model version is successfully deleted,
Status.ERROR if the model version does not exist otherwise.
"""
request = metadata_service_pb2.ModelVersionNameRequest(name=version, model_id=model_id)
response = self.metadata_store_stub.deleteModelVersionByVersion(request)
notification_client = get_notification_client()
if notification_client is not None:
model_meta = self.get_model_by_id(model_id)
model_version = ModelVersion(model_name=model_meta.name,
model_version=version)
notification_client.send_event(BaseEvent(model_meta.name,
json.dumps(model_version.__dict__),
ModelVersionEventType.MODEL_DELETED))
return _unwrap_delete_response(response)
def get_deployed_model_version(self, model_name) -> ModelVersionMeta:
request = ModelNameRequest(name=model_name)
response = self.metadata_store_stub.getDeployedModelVersion(request)
return _unwrap_model_version_response(response)
def get_latest_validated_model_version(self, model_name) -> ModelVersionMeta:
request = ModelNameRequest(name=model_name)
response = self.metadata_store_stub.getLatestValidatedModelVersion(request)
return _unwrap_model_version_response(response)
def get_latest_generated_model_version(self, model_name) -> ModelVersionMeta:
request = ModelNameRequest(name=model_name)
response = self.metadata_store_stub.getLatestGeneratedModelVersion(request)
return _unwrap_model_version_response(response)
'''project api'''
def get_project_by_id(self, project_id) -> Optional[ProjectMeta]:
"""
get a specific project in metadata store by project id
:param project_id: the project id
:return: A single :py:class:`ai_flow.meta.project.ProjectMeta` object if the project exists,
Otherwise, returns None if the project does not exist.
"""
request = metadata_service_pb2.IdRequest(id=project_id)
response = self.metadata_store_stub.getProjectById(request)
return _unwrap_project_response(response)
def get_project_by_name(self, project_name) -> Optional[ProjectMeta]:
"""
get a specific project in metadata store by project name
:param project_name: the project name
:return: A single :py:class:`ai_flow.meta.project.ProjectMeta` object if the project exists,
Otherwise, returns None if the project does not exist.
"""
request = metadata_service_pb2.NameRequest(name=project_name)
response = self.metadata_store_stub.getProjectByName(request)
return _unwrap_project_response(response)
def register_project(self, name, uri: Text = None, properties: Properties = None) -> ProjectMeta:
"""
register a project in metadata store.
:param name: the name of the project
:param uri: the uri of the project
:param properties: the properties of the project
:return: A single :py:class:`ai_flow.meta.project.ProjectMeta` object.
"""
project_request = ProjectProto(name=name, uri=stringValue(uri), properties=properties)
request = metadata_service_pb2.RegisterProjectRequest(project=project_request)
response = self.metadata_store_stub.registerProject(request)
return _unwrap_project_response(response)
def update_project(self, project_name: Text, uri: Text = None, properties: Properties = None) -> Optional[ProjectMeta]:
"""
update project in metadata store.
:param project_name: the name of the project
:param uri: the uri of the project
:param properties: the properties of the project
:return: A single :py:class:`ai_flow.meta.project.ProjectMeta` object if update successfully.
"""
request = metadata_service_pb2.UpdateProjectRequest(name=project_name, uri=stringValue(uri),
properties=properties)
response = self.metadata_store_stub.updateProject(request)
return _unwrap_project_response(response)
def list_projects(self, page_size, offset) -> Optional[List[ProjectMeta]]:
"""
List registered projects in metadata store.
:param page_size: the limitation of the listed projects.
:param offset: the offset of listed projects.
:return: List of :py:class:`ai_flow.meta.project_meta.ProjectMeta` objects,
return None if no projects to be listed.
"""
request = metadata_service_pb2.ListRequest(page_size=page_size, offset=offset)
response = self.metadata_store_stub.listProject(request)
return _unwrap_project_list_response(response)
def delete_project_by_id(self, project_id) -> Status:
"""
Delete the registered project by project id .
:param project_id: the project id
:return: Status.OK if the project is successfully deleted, Status.ERROR if the project does not exist otherwise.
"""
request = metadata_service_pb2.IdRequest(id=project_id)
response = self.metadata_store_stub.deleteProjectById(request)
return _unwrap_delete_response(response)
def delete_project_by_name(self, project_name) -> Status:
"""
Delete the registered project by project name .
:param project_name: the project name
:return: Status.OK if the project is successfully deleted, Status.ERROR if the project does not exist otherwise.
"""
request = metadata_service_pb2.NameRequest(name=project_name)
response = self.metadata_store_stub.deleteProjectByName(request)
return _unwrap_delete_response(response)
'''artifact api'''
def get_artifact_by_id(self, artifact_id) -> Optional[ArtifactMeta]:
"""
get a specific artifact in metadata store by artifact id.
:param artifact_id: the artifact id
:return: A single :py:class:`ai_flow.meta.artifact_meta.ArtifactMeta` object
if the artifact exists, Otherwise, returns None if the artifact does not exist.
"""
request = metadata_service_pb2.IdRequest(id=artifact_id)
response = self.metadata_store_stub.getArtifactById(request)
return _unwrap_artifact_response(response)
def get_artifact_by_name(self, artifact_name) -> Optional[ArtifactMeta]:
"""
get a specific artifact in metadata store by artifact name.
:param artifact_name: the artifact name
:return: A single :py:class:`ai_flow.meta.artifact_meta.ArtifactMeta` object
if the artifact exists, Otherwise, returns None if the artifact does not exist.
"""
request = metadata_service_pb2.NameRequest(name=artifact_name)
response = self.metadata_store_stub.getArtifactByName(request)
return _unwrap_artifact_response(response)
def register_artifact(self, name: Text, artifact_type: Text = None, description: Text = None,
uri: Text = None, properties: Properties = None) -> ArtifactMeta:
"""
register an artifact in metadata store.
:param name: the name of the artifact
:param artifact_type: the type of the artifact
:param description: the description of the artifact
:param uri: the uri of the artifact
:param properties: the properties of the artifact
:return: A single :py:class:`ai_flow.meta.artifact_meta.py.ArtifactMeta` object.
"""
artifact_request = ArtifactProto(name=name, artifact_type=stringValue(artifact_type),
description=stringValue(description), uri=stringValue(uri),
properties=properties)
request = | |
#!/usr/local/bin/python
"""
BSD 2-Clause License:
Copyright (c) 2013, iXSystems Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys
import csv
import os
######################################################
# Name: transform_sysctl.py
# Author: <NAME>
# Purpose: Transform logs into CSV format, and generate an R graph from the data
# Date: 03/26/2013
# Update: 04/01/2013 added scipen option to disable scentific notation
# Update: 04/01/2013 Rotated Y axis labels, removed Y axis title
# Update: 04/02/2013 Yanked R string, and put in source file, gets called from system shell.
# Update: 04/02/2013 Added cli option "--rgraph" generates R graphs, default is no graph generation.
# Update: 04/03/2013 Added blacklist, automatically reads from blacklist.txt if file exists
# Update: 04/03/2013 Added logic to filter out unchanging sysctls
# Update: 04/12/2013 Added iostat output to transform correctly
# Update: 04/14/2013 Added function to convert human readable outputs to machine readable
# Note: This program requires R installed. FreeBSD 9.1 doesn't have packages for it, so you have to build from ports
#
# Note: Some values from sysctls make the R graphs fai, I'v observed p1003_1b.delaytimer_max not generating a graph
# because R complains about the ylim. Not sure why yet, but <NAME> had a similar problem. Can swing back
# later and diagnose.
#------------------------------
# Usage: (make sure script is made executable with chmod +x transpose_sysctl.py
# ./transpose_sysctl sysctl?????.txt
#
# defaults with no graphing, should generate CSV file for each sysctl, with time stamp and
# value like this: Date,sysctlname
# (timestamp),1
# ...
#
# Option: --rgraph (Generates a Graph using R)
# example: ./transpose_sysctl sysctl??.txt --rgraph
#
# Creates a .PNG file of the sysctl value over time.
# --------------------------------------------------
# Dependencies:
#
# FreeBSD 9 or up: Should work on prior versions, but developed on 9 and tested
#
# capture_config.sh: Script file should be included with this, does the actual capture of data.
#
# R: R port or package, available in ports in: /usr/ports/math/R . R has a bunch of dependencies,
# if you can use the package go for it)
#
# data: You should have some pre-made data files created with the "capture_config.sh" script.
# You should see files created like sysclt_vm_1_sec.txt, or sysctl_all_1_sec.txt
#
# Full stack usage:
# ------------------
# 1.) Edit the capture_config.sh file with the appropriate NFS mount point and directory you want the data stored.
# 2.) run: % ./capture_config.sh
# 3.) Let it run over the time you want to capture data. (run in screens, tmux or background to make sure you don't lose your session)
# 4.) ctlr-C the program when your done. Note: double check that all the netstat, and iostat programs are killed off just incase)
# 5.) run transpose_sysctl.py
#
# Example run: % ./transpose_sysctl.py sysctl_all_1_sec.txt --rgraph
#
# If all goes right, this will post process your data, in to .PNG graphs.
#####################################################
# Todo:
# a.) diff inputs? reduce redundant reads. Helps with graphing, but could miss stable behavior?
#===========================================================================================================================
blacklist = [] #Empty Global blacklist. Add control name as string for hardcoded values, also reads blacklist_sysctl.txt if it exists
SYMBOLS = {
'customary' : ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB','M','G'),
'customary_ext' : ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa',
'zetta', 'iotta'),
'iec' : ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'),
'iec_ext' : ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
'zebi', 'yobi'),
}
def human2bytes(s):
s = s.strip()
if s.isdigit():
return long(s)
"""
Attempts to guess the string format based on default symbols
set and return the corresponding bytes as an integer.
When unable to recognize the format ValueError is raised.
>>> human2bytes('34GB')
36507222016L
"""
init = s
num = ""
while s and s[0:1].isdigit() or s[0:1] == '.':
num += s[0]
s = s[1:]
num = float(num)
letter = s.strip()
for name, sset in SYMBOLS.items():
if letter in sset:
break
else:
if letter == 'k':
sset = SYMBOLS['customary']
letter = letter.upper()
else:
raise ValueError("can't interpret %r" % init)
prefix = {sset[0]:1}
for i, s in enumerate(sset[1:]):
prefix[s] = 1 << (i+1)*10
return int(num * prefix[letter])
def gen_R_graph(sysctl):
# calls Rscript from command line, using plot_csv.R program
# Must have R installed to get graphs, and must call program with --rgraph
print "Generating R graph for: " + sysctl
os.system("Rscript --no-save --slave plot_csv.R " + sysctl)
def parse_line(inline): # Parse line for data
#inline=inline.strip('\n')
ListOfStrings = inline.split('|') # Parse data
datestring = ListOfStrings.pop(0) # Extract date field.
data=dict()
data['Date']=datestring # Add date key to dictionary
for element in ListOfStrings:
if element != '\n': #Ignore end of line in list
x = element.split(':')
if x[0] not in blacklist:
data[x[0]] = human2bytes(x[1])
#print "parsed line: " + str(data)
return data
def parse_keys(data): #Gets keys from record. #Warning: We need to deal with new column names?...
key_list = list()
for k,v in data.iteritems():
key_list.append(k)
return key_list
def has_duplicates(d): # Returns true , hmm, don't use cause there could be a sampling that is the same
return len(d) != len(set(d.values()))
def main():
if len(sys.argv) == 1:
print "Error: Missing filename"
exit(1)
rgraph = False
if len(sys.argv) > 2:
if sys.argv[2] == "--rgraph":
print "R graph option enabled."
rgraph = True
PurgeDups = True # Flag to eliminate duplicate readings for sysclts. Default is to purge.
if len(sys.argv) > 3:
if sys.argv[3] == "--all":
print "Make CSV and graphs for all sysctl's."
PurgeDups = False
filename = sys.argv[1]
print "Reading file: " + filename + " ..."
records = list()
f=0
try:
f =open(filename, "r") # Open file
except IOError as e:
print "I/O error({0}): {1}".format(e.errno, e.strerror)
exit(2)
except:
print "Unexpected error:", sys.exc_info()[0]
exit(3)
# Option to ignore static values
# Read in blacklist if file exists
if os.path.exists("blacklist.txt"): #If the blackfile exists
print "Skipping controls in Blacklist: " + str(blacklist)
with open("blacklist.txt") as b: #Open the file with CSV module
for row in csv.reader(b): # Read each row into row
if row: # Skip row if it's empty
blacklist.append(row.pop()) # Get first element from list and append to blacklist
b.close()
# Alls good, continue on, come back later and deal with read/write errors.
line = f.readline() # Get first line
#print line
first_record=parse_line(line) # Get First line
#Ignore logfile rotation
if "turned over" in line:
line = f.readline() # Get first line
first_record=parse_line(line) # Pet First line
keys=parse_keys(first_record) # Grab Header/keys
print "Number of sysctls: " + str(len(keys))
#records.append(first_record) # Add first record
print "Loading SYSCLT data..."
linecount=0
for line in iter(f): # Read rest of file
record = parse_line(line)
if "turned over" in line:
continue
compare_keys = list(set(parse_keys(record)) - set(keys))
if compare_keys:
print "New headers detected, adding.."
keys=parse_keys(record)
records.append(record)
#print "Records: " + str(records)
linecount += 1 # Increment
print " lines \r",linecount,
f.close()
print "Number of samples: " + str(len(records))
#Go through list of dictionaries, and remove entries that all have identical values..
#Compare all samples with first, with prior string and only add if delta exists anywhere, ignoring timestamp of course.
newkeys = list()
unique = 0
if PurgeDups:
# Records = list | |
STORE_ATTR _original_interaction_target_changed
244_0 COME_FROM 216 '216'
244_1 COME_FROM 210 '210'
244_2 COME_FROM 198 '198'
244_3 COME_FROM 172 '172'
L.1270 244 LOAD_FAST 'moved_social_group'
246_248 POP_JUMP_IF_TRUE 300 'to 300'
250 LOAD_FAST 'derailed_reason'
252 LOAD_GLOBAL MOVING_DERAILS
254 COMPARE_OP in
256_258 POP_JUMP_IF_FALSE 300 'to 300'
L.1273 260 LOAD_FAST 'self'
262 LOAD_ATTR interaction
264 LOAD_ATTR is_social
266_268 POP_JUMP_IF_FALSE 300 'to 300'
270 LOAD_FAST 'self'
272 LOAD_ATTR interaction
274 LOAD_ATTR social_group
276 LOAD_CONST None
278 COMPARE_OP is-not
280_282 POP_JUMP_IF_FALSE 300 'to 300'
L.1274 284 LOAD_FAST 'self'
286 LOAD_ATTR interaction
288 LOAD_ATTR social_group
290 LOAD_METHOD refresh_social_geometry
292 CALL_METHOD_0 0 '0 positional arguments'
294 POP_TOP
L.1275 296 LOAD_CONST True
298 STORE_FAST 'moved_social_group'
300_0 COME_FROM 280 '280'
300_1 COME_FROM 266 '266'
300_2 COME_FROM 256 '256'
300_3 COME_FROM 246 '246'
L.1277 300 LOAD_GLOBAL DerailReason
302 LOAD_ATTR NOT_DERAILED
304 LOAD_FAST 'self'
306 LOAD_ATTR _derailed
308 LOAD_FAST 'sim'
310 STORE_SUBSCR
L.1280 312 LOAD_FAST 'derailed_reason'
314 LOAD_GLOBAL DerailReason
316 LOAD_ATTR NAVMESH_UPDATED_BY_BUILD
318 COMPARE_OP ==
320 POP_JUMP_IF_FALSE 22 'to 22'
L.1281 322 LOAD_FAST 'sim'
324 LOAD_METHOD get_location_on_nearest_surface_below
326 CALL_METHOD_0 0 '0 positional arguments'
328 UNPACK_SEQUENCE_2 2
330 STORE_FAST 'location'
332 STORE_FAST '_'
L.1282 334 LOAD_FAST 'sim'
336 LOAD_METHOD validate_location
338 LOAD_FAST 'location'
340 CALL_METHOD_1 1 '1 positional argument'
342 POP_JUMP_IF_TRUE 22 'to 22'
L.1283 344 LOAD_FAST 'sim'
346 LOAD_ATTR schedule_reset_asap
348 LOAD_GLOBAL ResetReason
350 LOAD_ATTR RESET_EXPECTED
L.1284 352 LOAD_FAST 'self'
354 LOAD_STR 'Sim is in invalid location during transition.'
356 LOAD_CONST ('reset_reason', 'source', 'cause')
358 CALL_FUNCTION_KW_3 3 '3 total positional and keyword args'
360 POP_TOP
362 JUMP_BACK 22 'to 22'
364 POP_BLOCK
366_0 COME_FROM_LOOP 8 '8'
L.1286 366 SETUP_LOOP 396 'to 396'
368 LOAD_FAST 'sims_to_reset'
370 GET_ITER
372 FOR_ITER 394 'to 394'
374 STORE_FAST 'sim'
L.1287 376 LOAD_FAST 'self'
378 LOAD_METHOD set_sim_progress
380 LOAD_FAST 'sim'
382 LOAD_GLOBAL TransitionSequenceStage
384 LOAD_ATTR EMPTY
386 CALL_METHOD_2 2 '2 positional arguments'
388 POP_TOP
390_392 JUMP_BACK 372 'to 372'
394 POP_BLOCK
396_0 COME_FROM_LOOP 366 '366'
L.1289 396 LOAD_FAST 'sims_to_reset'
398_400 POP_JUMP_IF_FALSE 422 'to 422'
L.1290 402 LOAD_FAST 'self'
404 LOAD_ATTR interaction
406 LOAD_METHOD refresh_constraints
408 CALL_METHOD_0 0 '0 positional arguments'
410 POP_TOP
L.1291 412 LOAD_FAST 'self'
414 LOAD_METHOD release_stand_slot_reservations
416 LOAD_FAST 'sims_to_reset'
418 CALL_METHOD_1 1 '1 positional argument'
420 POP_TOP
422_0 COME_FROM 398 '398'
L.1293 422 LOAD_CONST False
424 LOAD_FAST 'self'
426 STORE_ATTR _has_tried_bring_group_along
Parse error at or near `LOAD_FAST' instruction at offset 164
def _validate_transitions(self):
for sim_data in self._sim_data.values():
if sim_data.path_spec is None or sim_data.path_spec is postures.posture_graph.EMPTY_PATH_SPEC:
self.cancel()
def end_transition(self):
if self._sim_data is not None:
for sim_data in self._sim_data.values():
included_sis = sim_data.constraint[1]
if included_sis is None:
continue
for included_si in included_sis:
included_si.transition = None
included_si.owning_transition_sequences.discard(self)
self._clear_target_interaction()
if self._sim_data is not None:
for sim, sim_data in self._sim_data.items():
if sim_data.path_spec is not None:
sim_data.path_spec.cleanup_path_spec(sim)
def shutdown(self):
self._clear_relevant_objects()
self._clear_target_location_changed_callbacks()
if self._sim_data is not None:
for sim in self._sim_data:
self._clear_owned_transition(sim)
social_group = sim.get_main_group()
if social_group is not None:
sims4.math.transform_almost_equal((sim.intended_transform), (sim.transform), epsilon=(sims4.geometry.ANIMATION_SLOT_EPSILON)) or social_group.refresh_social_geometry(sim=sim)
if self._success or self.canceled:
self.reset_all_progress()
self.cancel_incompatible_sis_given_final_posture_states()
services.current_zone().all_transition_controllers.discard(self)
def cancel_incompatible_sis_given_final_posture_states(self):
interaction = self.interaction
return interaction is None or interaction.cancel_incompatible_with_posture_on_transition_shutdown or None
cancel_reason_msg = "Incompatible with Sim's final transform."
for sim in self.get_transitioning_sims():
sim.evaluate_si_state_and_cancel_incompatible(FinishingType.INTERACTION_INCOMPATIBILITY, cancel_reason_msg)
def _clear_cancel_by_posture_change(self):
for sim_data in self._sim_data.values():
if sim_data.final_included_sis:
for si in sim_data.final_included_sis:
si.disable_cancel_by_posture_change = False
def _clear_owned_transition(self, sim):
sim_data = self._sim_data.get(sim)
if sim_data.final_included_sis:
for included_si in sim_data.final_included_sis:
included_si.owning_transition_sequences.discard(self)
included_sis = sim_data.constraint[1]
if included_sis:
for included_si in included_sis:
included_si.owning_transition_sequences.discard(self)
def _get_carry_transference_work(self):
carry_transference_work_begin = collections.defaultdict(list)
for sim in self._sim_data:
for si in sim.si_state:
if si._carry_transfer_animation is None:
continue
end_carry_transfer = si.get_carry_transfer_end_element()
carry_transference_work_begin[si.sim].append(build_critical_section(end_carry_transfer, flush_all_animations))
carry_transference_sis = set()
for sim_data in self._sim_data.values():
additional_templates = sim_data.templates[1]
if additional_templates:
carry_transference_sis.update(additional_templates.keys())
carry_si = sim_data.templates[2]
if carry_si is not None:
carry_transference_sis.add(carry_si)
carry_transference_sis.discard(self.interaction)
carry_transference_work_end = collections.defaultdict(list)
for si in carry_transference_sis:
if si._carry_transfer_animation is None:
continue
begin_carry_transfer = si.get_carry_transfer_begin_element()
carry_transference_work_end[si.sim].append(build_critical_section(begin_carry_transfer, flush_all_animations))
return (carry_transference_work_begin, carry_transference_work_end)
def _get_animation_work(self, animation):
return (
animation((self._interaction), sequence=()), flush_all_animations)
def get_final_included_sis_for_sim(self, sim):
if sim not in self._sim_data:
return
return self._sim_data[sim].final_included_sis
def get_tried_dest_nodes_for_sim(self, sim):
return self._tried_destinations[sim]
def get_sims_in_sim_data(self):
return self._sim_data
def compute_transition_connectivity(self):
gen = self.run_transitions(None, progress_max=(TransitionSequenceStage.CONNECTIVITY))
try:
next(gen)
logger.error('run_transitions yielded when computing connectivity.')
except StopIteration as exc:
try:
return exc.value
finally:
exc = None
del exc
def run_transitions(self, timeline, progress_max=TransitionSequenceStage.COMPLETE):
logger.debug('{}: Running.', self)
callback_utils.invoke_callbacks(callback_utils.CallbackEvent.TRANSITION_SEQUENCE_ENTER)
try:
try:
self._running = True
self._progress_max = progress_max
self.reset_derailed_transitions()
self._add_interaction_target_location_changed_callback()
for required_sim in self.get_transitioning_sims():
sim_data = self._sim_data.get(required_sim)
if sim_data is None or sim_data.progress < progress_max:
break
else:
return True
sim = self.interaction.get_participant(ParticipantType.Actor)
services.current_zone().all_transition_controllers.add(self)
if not (progress_max < TransitionSequenceStage.COMPLETE or self.interaction.disable_transitions):
yield from self._build_transitions(timeline)
if self.any_derailed:
return False
if progress_max < TransitionSequenceStage.COMPLETE:
services.current_zone().all_transition_controllers.discard(self)
return True
if self.interaction.disable_transitions:
result = yield from self.run_super_interaction(timeline, self.interaction)
return result
self._validate_transitions()
target_si, test_result = self.interaction.get_target_si()
if not test_result:
self.cancel((FinishingType.FAILED_TESTS), test_result=test_result)
if self.canceled:
failure_reason, failure_target = self.get_failure_reason_and_target(sim)
if failure_reason is not None or failure_target is not None:
yield from self._do(timeline, sim, handle_transition_failure(sim, (self.interaction.target), (self.interaction), failure_reason=failure_reason,
failure_object_id=failure_target))
return False
if target_si is not None:
if target_si.set_as_added_to_queue():
target_si.transition = self
self._target_interaction = target_si
for sim_data in self._sim_data.values():
if sim_data.final_included_sis:
for si in sim_data.final_included_sis:
si.disable_cancel_by_posture_change = True
carry_transference_work_begin, carry_transference_work_end = self._get_carry_transference_work()
if carry_transference_work_begin:
yield from self._do_must(timeline, self.sim, do_all(thread_element_map=carry_transference_work_begin))
self._worker_all_element = elements.AllElement([build_element(self._create_next_elements)])
result = yield from self._do(timeline, None, self._worker_all_element)
if carry_transference_work_end:
yield from self._do_must(timeline, self.sim, do_all(thread_element_map=carry_transference_work_end))
if progress_max == TransitionSequenceStage.COMPLETE:
blocked_sims = set()
for blocked_sim, reason in self._derailed.items():
if reason == DerailReason.WAIT_FOR_BLOCKING_SIMS:
blocked_sims.add(blocked_sim)
if blocked_sims:
yield from self._wait_for_violators(timeline, blocked_sims)
if not self._success:
if self._transition_canceled:
self.cancel()
if self.canceled or self.is_derailed(self._interaction.sim):
result = False
if result:
for _, transition in self.get_transitions_gen():
if transition:
result = False
break
if not self._shortest_path_success[sim]:
derail_reason = self._derailed.get(sim)
if derail_reason != DerailReason.WAIT_TO_BE_PUT_DOWN:
self.cancel()
return False
if result:
self._success = True
if not self.interaction.active:
if not self.interaction.is_finishing:
should_replace_posture_source = SuperInteraction.should_replace_posture_source_interaction(self.interaction)
would_replace_nonfinishing = should_replace_posture_source and not self.sim.posture.source_interaction.is_finishing
if would_replace_nonfinishing:
self.interaction.is_cancel_aop or self.sim.posture.source_interaction.merge(self.interaction)
self.interaction.cancel(FinishingType.TRANSITION_FAILURE, 'Transition Sequence. Replace posture source non-finishing.')
else:
if len(sim_data.path_spec.transition_specs) == 1:
sim_data.path_spec.transition_specs[0].do_reservation(self.sim) or self.sim.posture.source_interaction.merge(self.interaction)
self.interaction.cancel(FinishingType.TRANSITION_FAILURE, 'Transition Sequence. Reservation failed.')
else:
self.interaction.apply_posture_state(self.interaction.sim.posture_state)
result = yield from self.run_super_interaction(timeline, self.interaction)
except:
logger.debug('{} RAISED EXCEPTION.', self)
self._exited_due_to_exception = True
for sim in self._sim_jobs:
logger.warn('Terminating transition for Sim {}', sim)
for sim in self._sim_idles:
logger.warn('Terminating transition idle for Sim {}', sim)
self._sim_jobs.clear()
self._sim_idles.clear()
raise
finally:
if self._transition_canceled:
self.cancel()
logger.debug('{} DONE.', self)
self._clear_cancel_by_posture_change()
if progress_max == TransitionSequenceStage.COMPLETE:
sims_to_update_intended_location = set()
for sim in self.get_transitioning_sims():
if not sims4.math.transform_almost_equal((sim.intended_transform), (sim.transform), epsilon=(sims4.geometry.ANIMATION_SLOT_EPSILON)):
sims_to_update_intended_location.add(sim)
for _, _, carry_object in get_carried_objects_gen(sim):
if carry_object.is_sim:
sims_to_update_intended_location.add(carry_object)
self.shutdown()
if not (hasattr(self.interaction, 'suppress_transition_ops_after_death') and self.interaction.suppress_transition_ops_after_death):
for sim in sims_to_update_intended_location:
sim.routing_component.on_intended_location_changed(sim.intended_location)
if not self.any_derailed:
self.cancel_incompatible_sis_given_final_posture_states()
callback_utils.invoke_callbacks(callback_utils.CallbackEvent.TRANSITION_SEQUENCE_EXIT)
if not self._success:
if self._interaction.must_run:
for sim in self.get_transitioning_sims():
if self.is_derailed(sim):
break
else:
logger.warn('Failed to plan a must run interaction {}', (self.interaction), owner='tastle')
for sim in self.get_transitioning_sims():
self.sim.reset(ResetReason.RESET_EXPECTED, self, 'Failed to plan must run.')
self._worker_all_element = None
self._running = False
if self._sim_jobs:
raise AssertionError('Transition Sequence: Attempted to exit when there were still existing jobs. [tastle]')
return self._success
if False:
yield None
@staticmethod
def choose_hand_and_filter_specs(sim, posture_specs_and_vars, carry_target, used_hand_and_target=None):
new_specs_and_vars = []
already_matched = set()
used_hand = None
used_hand_target = None
left_carry_target = sim.posture_state.left.target
right_carry_target = sim.posture_state.right.target
chosen_hand = None
if left_carry_target == carry_target and carry_target is not None:
chosen_hand = Hand.LEFT
else:
if right_carry_target == carry_target and carry_target is not None:
chosen_hand = Hand.RIGHT
else:
if left_carry_target is None and right_carry_target is not None:
chosen_hand = Hand.LEFT
else:
if right_carry_target is None:
if left_carry_target is not None:
chosen_hand = Hand.RIGHT
else:
if used_hand_and_target is not None:
used_hand, used_hand_target = used_hand_and_target
if carry_target is used_hand_target:
chosen_hand = used_hand
else:
chosen_hand = Hand.LEFT if used_hand != Hand.LEFT else Hand.RIGHT
elif carry_target is not None:
allowed_hands = carry_target.get_allowed_hands(sim)
if len(allowed_hands) == 1:
chosen_hand = allowed_hands[0]
elif chosen_hand is None:
allowed_hands = set()
for _, posture_spec_vars, _ in posture_specs_and_vars:
required_hand = posture_spec_vars.get(PostureSpecVariable.HAND)
if required_hand is not None:
allowed_hands.add(required_hand)
if used_hand is not None:
allowed_hands.discard(used_hand)
else:
preferred_hand = sim.get_preferred_hand()
if not allowed_hands or preferred_hand in allowed_hands:
chosen_hand = preferred_hand
else:
pass
chosen_hand = allowed_hands.pop()
if chosen_hand is None:
logger.error('Failed to find a valid hand for {}', carry_target)
elif carry_target is not None:
allowed_hands = carry_target.get_allowed_hands(sim)
if not allowed_hands:
logger.error('Sim {} failed to find a | |
from array import array
from ctypes import c_uint8, c_float
from .api import api
from .const import *
from .utils import *
from .library import library
from .device import Device
from .oscilloscopechannels import OscilloscopeChannels
from .exceptions import *
class Oscilloscope(Device):
""""""
def __init__(self, handle):
super(Oscilloscope, self).__init__(handle)
self._channels = OscilloscopeChannels(handle)
def _get_channels(self):
return self._channels
def get_data(self, count=None, raw=False):
""" Get the measurement data for enabled channels.
:param count: Number of samples to read, defaults to all.
:param raw: Get raw data.
:returns: `list` of `array.array`'s with sample data.
"""
if not self.is_data_ready:
raise UnsuccessfulError()
channel_count = len(self.channels)
# Calculate valid data start/length:
if self._measure_mode == MM_BLOCK:
length = int(self._record_length - round(self._pre_sample_ratio * self._record_length) + self.valid_pre_sample_count)
start = self._record_length - length
else:
length = self._record_length
start = 0
if (count is not None) and (count >= 0) and (count < length):
length = count
# Create pointer array:
pointers = api.HlpPointerArrayNew(channel_count)
try:
# Allocate memory and fill pointer array:
result = [None] * channel_count
for i in range(channel_count):
if self._active_channels[i]:
if raw:
raw_type = self.channels[i].data_raw_type
if raw_type == DATARAWTYPE_INT8:
result[i] = array('b', [0]) * length
elif raw_type == DATARAWTYPE_INT16:
result[i] = array('h', [0]) * length
elif raw_type == DATARAWTYPE_INT32:
result[i] = array('l', [0]) * length
elif raw_type == DATARAWTYPE_INT64:
result[i] = array('q', [0]) * length
elif raw_type == DATARAWTYPE_UINT8:
result[i] = array('B', [0]) * length
elif raw_type == DATARAWTYPE_UINT16:
result[i] = array('H', [0]) * length
elif raw_type == DATARAWTYPE_UINT32:
result[i] = array('L', [0]) * length
elif raw_type == DATARAWTYPE_UINT64:
result[i] = array('Q', [0]) * length
elif raw_type == DATARAWTYPE_FLOAT32:
result[i] = array('f', [0]) * length
elif raw_type == DATARAWTYPE_FLOAT64:
result[i] = array('d', [0]) * length
else:
raise UnsuccessfulError()
else:
result[i] = array('f', [0]) * length
api.HlpPointerArraySet(pointers, i, cast(result[i].buffer_info()[0], c_void_p))
# Get the data:
if raw:
api.ScpGetDataRaw(self._handle, pointers, channel_count, start, length)
else:
api.ScpGetData(self._handle, pointers, channel_count, start, length)
library.check_last_status_raise_on_error()
finally:
# Delete pointer array:
api.HlpPointerArrayDelete(pointers)
return result
def _get_valid_pre_sample_count(self):
""" Number of valid pre samples in the measurement. """
value = api.ScpGetValidPreSampleCount(self._handle)
library.check_last_status_raise_on_error()
return value
def get_data_raw(self, buffers, channel_count, start_index, sample_count):
""" Get raw measurement data.
:param buffers: Pointer to buffer with pointers to buffer for channel data, pointer buffer may contain ``None`` pointers.
:param channel_count: Number of pointers in pointer buffer.
:param start_index: Position in record to start reading.
:param sample_count: Number of samples to read.
:returns: Number of samples read.
"""
result = api.ScpGetDataRaw(self._handle, buffers, channel_count, start_index, sample_count)
library.check_last_status_raise_on_error()
return result
def get_data_async_completed(self):
""" Check whether the data download is completed.
:returns: ``True`` if completed, ``False`` otherwise.
"""
result = api.ScpIsGetDataAsyncCompleted(self._handle)
library.check_last_status_raise_on_error()
return result != BOOL8_FALSE
def start_get_data_async(self, buffers, channel_count, start_index, sample_count):
""" Start the download of measurement data for specified channels.
:param buffers: A pointer to a buffer with pointers to buffers for channel data, the pointer buffer may contain ``None`` pointers.
:param channel_count: The number of pointers in the pointer buffer.
:param start_index: The position in the record to start reading.
:param sample_count: The number of samples to read.
.. versionadded:: 0.6
"""
api.ScpStartGetDataAsync(self._handle, buffers, channel_count, start_index, sample_count)
library.check_last_status_raise_on_error()
def start_get_data_async_raw(self, buffers, channel_count, start_index, sample_count):
""" Start the download of raw measurement data for specified channels.
:param buffers: Pointer to buffer with pointers to buffer for channel data, pointer buffer may contain ``None`` pointers.
:param channel_count: Number of pointers in pointer buffer.
:param start_index: Position in record to start reading.
:param sample_count: Number of samples to read.
.. versionadded:: 0.6
"""
api.ScpStartGetDataAsyncRaw(self._handle, buffers, channel_count, start_index, sample_count)
library.check_last_status_raise_on_error()
def cancel_get_data_async(self):
""" Cancel the download of measurement data.
:returns: ``True`` if successful, ``False`` otherwise.
.. versionadded:: 0.6
"""
result = api.ScpCancelGetDataAsync(self._handle)
library.check_last_status_raise_on_error()
return result != BOOL8_FALSE
def set_callback_data_ready(self, callback, data):
""" Set a callback function which is called when the oscilloscope has new measurement data ready.
:param callback: A pointer to the callback function. Use ``None`` to disable.
:param data: Optional user data.
"""
api.ScpSetCallbackDataReady(self._handle, callback, data)
library.check_last_status_raise_on_error()
def set_callback_data_overflow(self, callback, data):
""" Set a callback function which is called when the oscilloscope streaming measurement caused an data overflow.
:param callback: A pointer to the callback function. Use ``None`` to disable.
:param data: Optional user data.
"""
api.ScpSetCallbackDataOverflow(self._handle, callback, data)
library.check_last_status_raise_on_error()
def set_callback_connection_test_completed(self, callback, data):
""" Set a callback function which is called when the oscilloscope connection test is completed.
:param callback: A pointer to the callback function. Use ``None`` to disable.
:param data: Optional user data.
"""
api.ScpSetCallbackConnectionTestCompleted(self._handle, callback, data)
library.check_last_status_raise_on_error()
def set_callback_triggered(self, callback, data):
""" Set a callback function which is called when the oscilloscope is triggered.
:param callback: A pointer to the callback function. Use ``None`` to disable.
:param data: Optional user data.
"""
api.ScpSetCallbackTriggered(self._handle, callback, data)
library.check_last_status_raise_on_error()
if platform.system() == 'Linux':
def set_event_data_ready(self, event):
""" Set an event file descriptor which is set when the oscilloscope has new measurement data ready.
:param event: An event file descriptor. Use ``<0`` to disable.
"""
api.ScpSetEventDataReady(self._handle, event)
library.check_last_status_raise_on_error()
def set_event_data_overflow(self, event):
""" Set an event file descriptor which is set when the oscilloscope streaming measurement caused an data overflow.
:param event: An event file descriptor. Use ``<0`` to disable.
"""
api.ScpSetEventDataOverflow(self._handle, event)
library.check_last_status_raise_on_error()
def set_event_connection_test_completed(self, event):
""" Set an event file descriptor which is set when the oscilloscope connection test is completed.
:param event: An event file descriptor. Use ``<0`` to disable.
"""
api.ScpSetEventConnectionTestCompleted(self._handle, event)
library.check_last_status_raise_on_error()
def set_event_triggered(self, event):
""" Set an event file descriptor which is set when the oscilloscope is triggered.
:param event: An event file descriptor. Use ``<0`` to disable.
"""
api.ScpSetEventTriggered(self._handle, event)
library.check_last_status_raise_on_error()
if platform.system() == 'Windows':
def set_event_data_ready(self, event):
""" Set an event object handle which is set when the oscilloscope has new measurement data ready.
:param event: A handle to the event object. Use ``None`` to disable.
"""
api.ScpSetEventDataReady(self._handle, event)
library.check_last_status_raise_on_error()
def set_event_data_overflow(self, event):
""" Set an event object handle which is set when the oscilloscope streaming measurement caused an data overflow.
:param event: A handle to the event object. Use ``None`` to disable.
"""
api.ScpSetEventDataOverflow(self._handle, event)
library.check_last_status_raise_on_error()
def set_event_connection_test_completed(self, event):
""" Set an event object handle which is set when the oscilloscope connection test is completed.
:param event: A handle to the event object. Use ``None`` to disable.
"""
api.ScpSetEventConnectionTestCompleted(self._handle, event)
library.check_last_status_raise_on_error()
def set_event_triggered(self, event):
""" Set an event object handle which is set when the oscilloscope is triggered.
:param event: A handle to the event object. Use ``None`` to disable.
"""
api.ScpSetEventTriggered(self._handle, event)
library.check_last_status_raise_on_error()
def set_message_data_ready(self, wnd, wparam, lparam):
""" Set a window handle to which a #WM_LIBTIEPIE_SCP_DATAREADY message is sent when the oscilloscope has new measurement data ready.
:param wnd: A handle to the window whose window procedure is to receive the message. Use ``None`` to disable.
:param wparam: Optional user value for the ``wParam`` parameter of the message.
:param lparam: Optional user value for the ``lParam`` parameter of the message.
"""
api.ScpSetMessageDataReady(self._handle, wnd, wparam, lparam)
library.check_last_status_raise_on_error()
def set_message_data_overflow(self, wnd, wparam, lparam):
""" Set a window handle to which a #WM_LIBTIEPIE_SCP_DATAOVERFLOW message is sent when the oscilloscope streaming measurement caused an data overflow.
:param wnd: A handle to the window whose window procedure is to receive the message. Use ``None`` to disable.
:param wparam: Optional user value for the ``wParam`` parameter of the message.
:param lparam: Optional user value for the ``lParam`` parameter of the message.
"""
api.ScpSetMessageDataOverflow(self._handle, wnd, wparam, lparam)
library.check_last_status_raise_on_error()
def set_message_connection_test_completed(self, wnd, wparam, lparam):
""" Set a window handle to which a #WM_LIBTIEPIE_SCP_CONNECTIONTESTCOMPLETED message is sent when the oscilloscope connection test is completed.
:param wnd: A handle to the window whose window procedure is to receive the message. Use ``None`` to disable.
:param wparam: Optional user value for the ``wParam`` parameter of the message.
:param lparam: Optional user value for the ``lParam`` parameter of the message.
"""
api.ScpSetMessageConnectionTestCompleted(self._handle, wnd, wparam, lparam)
library.check_last_status_raise_on_error()
def set_message_triggered(self, wnd, wparam, lparam):
""" Set a window handle to which a #WM_LIBTIEPIE_SCP_TRIGGERED message is sent when the oscilloscope is triggered.
:param wnd: A handle to | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# progressbar - Text progress bar library for Python.
# Copyright (c) 2005 <NAME>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Default ProgressBar widgets."""
from __future__ import division
import datetime
import math
import uuid
try:
from abc import ABCMeta, abstractmethod
except ImportError:
AbstractWidget = object
abstractmethod = lambda fn: fn
else:
AbstractWidget = ABCMeta('AbstractWidget', (object,), {})
def format_updatable(updatable, pbar):
if hasattr(updatable, 'update'): return updatable.update(pbar)
else: return updatable
def format_updatable_html(updatable, pbar):
if hasattr(updatable, 'update_html'): return updatable.update_html(pbar)
else: return updatable
def updatable_js(updatable, pbar):
if hasattr(updatable, 'update_js'): return updatable.update_js(pbar)
else: return None
class Widget(AbstractWidget):
"""The base class for all widgets.
The ProgressBar will call the widget's update value when the widget should
be updated. The widget's size may change between calls, but the widget may
display incorrectly if the size changes drastically and repeatedly.
The boolean TIME_SENSITIVE informs the ProgressBar that it should be
updated more often because it is time sensitive.
"""
TIME_SENSITIVE = False
__slots__ = ()
uuid = None
@abstractmethod
def update(self, pbar):
"""Updates the widget.
pbar - a reference to the calling ProgressBar
"""
def update_html(self, pbar):
if self.uuid is None:
self.uuid = str(uuid.uuid4())
return '<div id="%s">%s</div>' % (self.uuid, self.update(pbar))
def update_js(self, pbar):
if self.uuid is None:
self.uuid = str(uuid.uuid4())
return "$('div#%s').text('%s');" % (self.uuid, self.update(pbar))
class WidgetHFill(Widget):
"""The base class for all variable width widgets.
This widget is much like the \\hfill command in TeX, it will expand to
fill the line. You can use more than one in the same line, and they will
all have the same width, and together will fill the line.
"""
DEFAULT_WIDTH = 50
@abstractmethod
def update(self, pbar, width=DEFAULT_WIDTH):
"""Updates the widget providing the total width the widget must fill.
pbar - a reference to the calling ProgressBar
width - The total width the widget must fill
"""
class Timer(Widget):
"""Widget which displays the elapsed seconds."""
__slots__ = ('format_string',)
TIME_SENSITIVE = True
def __init__(self, format='Elapsed Time: %s'):
self.format_string = format
@staticmethod
def format_time(seconds):
"""Formats time as the string "HH:MM:SS"."""
return str(datetime.timedelta(seconds=int(seconds)))
def update(self, pbar):
"""Updates the widget to show the elapsed time."""
return self.format_string % self.format_time(pbar.seconds_elapsed)
class ETA(Timer):
"""Widget which attempts to estimate the time of arrival."""
TIME_SENSITIVE = True
def update(self, pbar):
"""Updates the widget to show the ETA or total time when finished."""
if pbar.currval == 0:
return 'ETA: --:--:--'
elif pbar.finished:
return 'Time: %s' % self.format_time(pbar.seconds_elapsed)
else:
elapsed = pbar.seconds_elapsed
eta = elapsed * pbar.maxval / pbar.currval - elapsed
return 'ETA: %s' % self.format_time(eta)
class FileTransferSpeed(Widget):
"""Widget for showing the transfer speed (useful for file transfers)."""
FORMAT = '%6.2f %s%s/s'
PREFIXES = ' kMGTPEZY'
__slots__ = ('unit',)
def __init__(self, unit='B'):
self.unit = unit
def update(self, pbar):
"""Updates the widget with the current SI prefixed speed."""
if pbar.seconds_elapsed < 2e-6 or pbar.currval < 2e-6: # =~ 0
scaled = power = 0
else:
speed = pbar.currval / pbar.seconds_elapsed
power = int(math.log(speed, 1000))
scaled = speed / 1000.**power
return self.FORMAT % (scaled, self.PREFIXES[power], self.unit)
class AnimatedMarker(Widget):
"""An animated marker for the progress bar which defaults to appear as if
it were rotating.
"""
__slots__ = ('markers', 'curmark')
def __init__(self, markers='|/-\\'):
self.markers = markers
self.curmark = -1
def update(self, pbar):
"""Updates the widget to show the next marker or the first marker when
finished"""
if pbar.finished: return self.markers[0]
self.curmark = (self.curmark + 1) % len(self.markers)
return self.markers[self.curmark]
# Alias for backwards compatibility
RotatingMarker = AnimatedMarker
class Counter(Widget):
"""Displays the current count."""
__slots__ = ('format_string',)
def __init__(self, format='%d'):
self.format_string = format
def update(self, pbar):
return self.format_string % pbar.currval
class Attribute(Widget):
"""Displays the values of ProgressBar attributes.
attr_name - ProgressBar attribute dictionary key or list of keys
format_string - Format for the output. Attributes are looked up according
to attr_name and then used as a tuple with this format string, i.e.
format_string % attr_tuple
fallback - If an attribute lookup fails, this string is displayed instead.
"""
__slots__ = ('attr_name', 'format_string', 'fallback')
def __init__(self, attr_name, format='%s', fallback='?'):
self.attr_name = attr_name
self.format_string = format
self.fallback = fallback
def update(self, pbar):
try:
if isinstance(self.attr_name, str) or len(self.attr_name) == 1:
# If attr_name is just a string or a single item,
# use it as the key as is
format_vars = (pbar.attr[self.attr_name],)
else:
# else, expand it as a tuple of attributes
format_vars = tuple([pbar.attr[a] for a in self.attr_name])
return self.format_string % format_vars
except KeyError:
return self.fallback
class Percentage(Widget):
"""Displays the current percentage as a number with a percent sign."""
def update(self, pbar):
return '%3d%%' % pbar.percentage()
class FormatLabel(Timer):
"""Displays a formatted label."""
mapping = {
'elapsed': ('seconds_elapsed', Timer.format_time),
'finished': ('finished', None),
'last_update': ('last_update_time', None),
'max': ('maxval', None),
'seconds': ('seconds_elapsed', None),
'start': ('start_time', None),
'value': ('currval', None)
}
__slots__ = ('format_string',)
def __init__(self, format):
self.format_string = format
def update(self, pbar):
context = {}
for name, (key, transform) in self.mapping.items():
try:
value = getattr(pbar, key)
if transform is None:
context[name] = value
else:
context[name] = transform(value)
except: pass
return self.format_string % context
class SimpleProgress(Widget):
"""Returns progress as a count of the total (e.g.: "5 of 47")."""
__slots__ = ('sep',)
def __init__(self, sep=' of '):
self.sep = sep
def update(self, pbar):
return '%d%s%d' % (pbar.currval, self.sep, pbar.maxval)
class Bar(WidgetHFill):
"""A progress bar which stretches to fill the line."""
__slots__ = ('marker', 'left', 'right', 'fill', 'fill_left')
def __init__(self, marker='#', left='|', right='|', fill=' ',
fill_left=True):
"""Creates a customizable progress bar.
marker - string or updatable object to use as a marker
left - string or updatable object to use as a left border
right - string or updatable object to use as a right border
fill - character to use for the empty part of the progress bar
fill_left - whether to fill from the left or the right
"""
self.marker = marker
self.left = left
self.right = right
self.fill = fill
self.fill_left = fill_left
def update(self, pbar, width=WidgetHFill.DEFAULT_WIDTH):
"""Updates the progress bar and its subcomponents."""
left, marked, right = (format_updatable(i, pbar) for i in
(self.left, self.marker, self.right))
width -= len(left) + len(right)
# Marked must *always* have length of 1
if pbar.maxval:
marked *= int(pbar.currval / pbar.maxval * width)
else:
marked = ''
if self.fill_left:
return '%s%s%s' % (left, marked.ljust(width, self.fill), right)
else:
return '%s%s%s' % (left, marked.rjust(width, self.fill), right)
def update_html(self, pbar):
if self.uuid is None:
self.uuid = str(uuid.uuid4())
return """
<div class="pb_bar" id="%s"></div>
<script type="text/javascript">
$("div#%s").progressbar({value: 0, max: %d});
</script>
""" % (self.uuid, self.uuid,pbar.maxval)
def update_js(self, pbar):
if self.uuid is None:
self.uuid = str(uuid.uuid4())
return """
var $myPB = $("div#{divid}")
if ($myPB.hasClass('ui-progressbar')) {{
$myPB.progressbar('value', {pbar.currval:d});
}} else {{
$myPB.progressbar({{value: 0, max: {pbar.maxval:d}}});
}}
""".format(divid=self.uuid, pbar=pbar)
class ReverseBar(Bar):
"""A bar which has a marker which bounces from side to side."""
def __init__(self, marker='#', left='|', right='|', fill=' ',
fill_left=False):
"""Creates a customizable progress bar.
marker - string or updatable object to use as a marker
left - string or updatable object to use as a left border
right - string or updatable object to use as a right border
fill - character to use for the empty part of the progress bar
fill_left - whether to fill from the left or the right
"""
self.marker = marker
self.left = left
self.right = right
self.fill = fill
self.fill_left = fill_left
class BouncingBar(Bar):
def update(self, pbar, width=WidgetHFill.DEFAULT_WIDTH):
"""Updates the progress bar and its subcomponents."""
left, marker, right = (format_updatable(i, pbar) for i in
(self.left, self.marker, self.right))
width -= | |
"trust_with_sets": [
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3},
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}
]
}
]
}
),
(
[
{
"col_names": ["a", "b"],
"stored_with": {1, 2, 3},
"plaintext_sets": [{1, 2, 3}, set()],
"trust_with_sets": [{1, 2, 3}, set()]
}
],
{
"node_order": [Create, Deciles, Collect],
"requires_mpc": [True, True, False],
"ownership_data": [
{
"stored_with": [{1, 2, 3}],
"plaintext_sets": [{1, 2, 3}, set()],
"trust_with_sets": [{1, 2, 3}, set()]
},
{
"stored_with": [{1, 2, 3}],
"plaintext_sets": [
set(), set(), set(), set(),
set(), set(), set(), set(), set()
],
"trust_with_sets": [
set(), set(), set(), set(),
set(), set(), set(), set(), set()
]
},
{
"stored_with": [{1}, {2}, {3}],
"plaintext_sets": [
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3},
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}
],
"trust_with_sets": [
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3},
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}
]
}
]
}
),
(
[
{
"col_names": ["a", "b"],
"stored_with": {1, 2, 3},
"plaintext_sets": [{1}, {2}],
"trust_with_sets": [{1}, {2}]
}
],
{
"node_order": [Create, Deciles, Collect],
"requires_mpc": [True, True, False],
"ownership_data": [
{
"stored_with": [{1, 2, 3}],
"plaintext_sets": [{1}, {2}],
"trust_with_sets": [{1}, {2}]
},
{
"stored_with": [{1, 2, 3}],
"plaintext_sets": [
{2}, {2}, {2}, {2}, {2},
{2}, {2}, {2}, {2}
],
"trust_with_sets": [
{2}, {2}, {2}, {2}, {2},
{2}, {2}, {2}, {2}
]
},
{
"stored_with": [{1}, {2}, {3}],
"plaintext_sets": [
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3},
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}
],
"trust_with_sets": [
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3},
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}
]
}
]
}
),
(
[
{
"col_names": ["a", "b", "c"],
"stored_with": {1, 2, 3},
"plaintext_sets": [{1, 2}, {3}, {1}],
"trust_with_sets": [{1, 2}, {3}, {1}]
}
],
{
"node_order": [Create, Deciles, Collect],
"requires_mpc": [True, True, False],
"ownership_data": [
{
"stored_with": [{1, 2, 3}],
"plaintext_sets": [{1, 2}, {3}, {1}],
"trust_with_sets": [{1, 2}, {3}, {1}]
},
{
"stored_with": [{1, 2, 3}],
"plaintext_sets": [
{3}, {3}, {3}, {3}, {3},
{3}, {3}, {3}, {3}
],
"trust_with_sets": [
{3}, {3}, {3}, {3}, {3},
{3}, {3}, {3}, {3}
]
},
{
"stored_with": [{1}, {2}, {3}],
"plaintext_sets": [
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3},
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}
],
"trust_with_sets": [
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3},
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}
]
}
]
}
)
])
def test_deciles_no_group_col(party_data, expected):
input_cols = create_cols(party_data[0])
c = create("in1", input_cols, party_data[0]["stored_with"])
di = deciles(c, "dec", [], party_data[0]["col_names"][1])
collect(di, {1, 2, 3})
d = Dag({c})
compare_to_expected(d, expected)
@pytest.mark.parametrize("party_data, expected", [
(
[
{
"col_names": ["a", "b"],
"stored_with": {1, 2, 3},
"plaintext_sets": [set(), set()],
"trust_with_sets": [set(), set()]
}
],
{
"node_order": [Create, AllStats, Collect],
"requires_mpc": [True, True, False],
"ownership_data": [
{
"stored_with": [{1, 2, 3}],
"plaintext_sets": [set(), set()],
"trust_with_sets": [set(), set()]
},
{
"stored_with": [{1, 2, 3}],
"plaintext_sets": [
set(), set(), set(), set(), set(),
set(), set(), set(), set(), set(),
set(), set(), set(), set(), set(),
set(), set(), set()
],
"trust_with_sets": [
set(), set(), set(), set(), set(),
set(), set(), set(), set(), set(),
set(), set(), set(), set(), set(),
set(), set(), set()
]
},
{
"stored_with": [{1}, {2}, {3}],
"plaintext_sets": [
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3},
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3},
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3},
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}
],
"trust_with_sets": [
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3},
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3},
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3},
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}
]
}
]
}
),
(
[
{
"col_names": ["a", "b"],
"stored_with": {1, 2, 3},
"plaintext_sets": [{1, 2, 3}, set()],
"trust_with_sets": [{1, 2, 3}, set()]
}
],
{
"node_order": [Create, AllStats, Collect],
"requires_mpc": [True, True, False],
"ownership_data": [
{
"stored_with": [{1, 2, 3}],
"plaintext_sets": [{1, 2, 3}, set()],
"trust_with_sets": [{1, 2, 3}, set()]
},
{
"stored_with": [{1, 2, 3}],
"plaintext_sets": [
{1, 2, 3}, set(), set(), set(), set(),
set(), set(), set(), set(), set(),
set(), set(), set(), set(), set(),
set(), set(), set()
],
"trust_with_sets": [
{1, 2, 3}, set(), set(), set(), set(),
set(), set(), set(), set(), set(),
set(), set(), set(), set(), set(),
set(), set(), set()
]
},
{
"stored_with": [{1}, {2}, {3}],
"plaintext_sets": [
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3},
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3},
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3},
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}
],
"trust_with_sets": [
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3},
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3},
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3},
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}
]
}
]
}
),
(
[
{
"col_names": ["a", "b"],
"stored_with": {1, 2, 3},
"plaintext_sets": [{1}, {2}],
"trust_with_sets": [{1}, {2}]
}
],
{
"node_order": [Create, AllStats, Collect],
"requires_mpc": [True, True, False],
"ownership_data": [
{
"stored_with": [{1, 2, 3}],
"plaintext_sets": [{1}, {2}],
"trust_with_sets": [{1}, {2}]
},
{
"stored_with": [{1, 2, 3}],
"plaintext_sets": [
{1}, set(), set(), set(), set(),
set(), set(), set(), set(), set(),
set(), set(), set(), set(), set(),
set(), set(), set()
],
"trust_with_sets": [
{1}, set(), set(), set(), set(),
set(), set(), set(), set(), set(),
set(), set(), set(), set(), set(),
set(), set(), set()
]
},
{
"stored_with": [{1}, {2}, {3}],
"plaintext_sets": [
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3},
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3},
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3},
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}
],
"trust_with_sets": [
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3},
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3},
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3},
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}
]
}
]
}
),
(
[
{
"col_names": ["a", "b"],
"stored_with": {1, 2, 3},
"plaintext_sets": [{1, 2}, {1, 3}],
"trust_with_sets": [{1, 2}, {1, 3}]
}
],
{
"node_order": [Create, AllStats, Collect],
"requires_mpc": [True, False, False],
"ownership_data": [
{
"stored_with": [{1, 2, 3}],
"plaintext_sets": [{1, 2}, {1, 3}],
"trust_with_sets": [{1, 2}, {1, 3}]
},
{
"stored_with": [{1, 2, 3}],
"plaintext_sets": [
{1, 2}, {1}, {1}, {1}, {1},
{1}, {1}, {1}, {1}, {1},
{1}, {1}, {1}, {1}, {1},
{1}, {1}, {1}
],
"trust_with_sets": [
{1, 2}, {1}, {1}, {1}, {1},
{1}, {1}, {1}, {1}, {1},
{1}, {1}, {1}, {1}, {1},
{1}, {1}, {1}
]
},
{
"stored_with": [{1}, {2}, {3}],
"plaintext_sets": [
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3},
{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, | |
r"""
Factory
This file contains the constructor classes and functions for `p`-adic rings and fields.
AUTHORS:
- <NAME>
TESTS::
sage: R = ZpLC(2)
doctest:...: FutureWarning: This class/method/function is marked as experimental. It, its functionality or its interface might change without a formal deprecation.
See http://trac.sagemath.org/23505 for details.
sage: R = ZpLF(2)
sage: R = QpLC(2)
sage: R = QpLF(2)
"""
#*****************************************************************************
# Copyright (C) 2007-2013 <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import absolute_import, print_function
from sage.misc.superseded import experimental
from sage.structure.factory import UniqueFactory
from sage.rings.integer import Integer
from sage.rings.infinity import Infinity
from sage.structure.factorization import Factorization
from sage.rings.integer_ring import ZZ
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.rings.polynomial.polynomial_element import is_Polynomial
from sage.structure.element import is_Element
from .padic_base_leaves import (pAdicRingCappedRelative,
pAdicRingCappedAbsolute,
pAdicRingFixedMod,
pAdicRingFloatingPoint,
pAdicRingLattice,
pAdicFieldCappedRelative,
pAdicFieldFloatingPoint,
pAdicFieldLattice)
from . import padic_printing
######################################################
# ext_table --
# This dictionary controls what class is created by the extension
# factory when it finds a given class in the ground ring of the tower.
######################################################
from .padic_extension_leaves import *
from .relative_extension_leaves import *
from functools import reduce
#This imports all of the classes used in the ext_table below.
ext_table = {}
ext_table['e', pAdicFieldCappedRelative] = EisensteinExtensionFieldCappedRelative
ext_table['e', pAdicRingCappedAbsolute] = EisensteinExtensionRingCappedAbsolute
ext_table['e', pAdicRingCappedRelative] = EisensteinExtensionRingCappedRelative
ext_table['e', pAdicRingFixedMod] = EisensteinExtensionRingFixedMod
#ext_table['e', pAdicRingFloatingPoint] = EisensteinExtensionRingFloatingPoint
#ext_table['e', pAdicFieldFloatingPoint] = EisensteinExtensionFieldFloatingPoint
#ext_table['p', pAdicFieldCappedRelative] = pAdicGeneralExtensionFieldCappedRelative
#ext_table['p', pAdicRingCappedAbsolute] = pAdicGeneralExtensionRingCappedAbsolute
#ext_table['p', pAdicRingCappedRelative] = pAdicGeneralExtensionRingCappedRelative
#ext_table['p', pAdicRingFixedMod] = pAdicGeneralExtensionRingFixedMod
ext_table['u', pAdicFieldCappedRelative] = UnramifiedExtensionFieldCappedRelative
ext_table['u', pAdicRingCappedAbsolute] = UnramifiedExtensionRingCappedAbsolute
ext_table['u', pAdicRingCappedRelative] = UnramifiedExtensionRingCappedRelative
ext_table['u', pAdicRingFixedMod] = UnramifiedExtensionRingFixedMod
ext_table['u', pAdicRingFloatingPoint] = UnramifiedExtensionRingFloatingPoint
ext_table['u', pAdicFieldFloatingPoint] = UnramifiedExtensionFieldFloatingPoint
ext_table['re', pAdicRingFixedMod] = RelativeRamifiedExtensionRingFixedMod
ext_table['re', pAdicRingCappedAbsolute] = RelativeRamifiedExtensionRingCappedAbsolute
ext_table['re', pAdicRingCappedRelative] = RelativeRamifiedExtensionRingCappedRelative
ext_table['re', pAdicFieldCappedRelative] = RelativeRamifiedExtensionFieldCappedRelative
ext_table['re', pAdicRingFloatingPoint] = RelativeRamifiedExtensionRingFloatingPoint
ext_table['re', pAdicFieldFloatingPoint] = RelativeRamifiedExtensionFieldFloatingPoint
def _canonicalize_show_prec(type, print_mode, show_prec=None):
r"""
Return a canonical string value for show_prec depending of the type,
the print_mode and the given value.
INPUT:
- ``type`` -- a string: ``'capped-rel'``, ``'capped-abs'``, ``'fixed-mod'`` or ``'floating-point'``,
``'lattice-cap'`` or ``'lattice-float'``
- ``print_mode`` -- a string: ``'series'``, ``'terse'``, ``'val-unit'``, ``'digits'``, ``'bars'``
- ``show_prec`` -- a boolean, string or ``None``
OUTPUT:
A string, either ``'bigoh'``, ``'dots'`` or ``'none'``
EXAMPLES::
sage: from sage.rings.padics.factory import _canonicalize_show_prec
sage: _canonicalize_show_prec('floating-point', 'series')
'none'
sage: _canonicalize_show_prec('capped-rel', 'series')
'bigoh'
sage: _canonicalize_show_prec('capped-rel', 'series', False)
'none'
sage: _canonicalize_show_prec('capped-abs', 'digits')
'dots'
sage: _canonicalize_show_prec('capped-abs', 'digits', 'bigoh')
'bigoh'
TESTS::
sage: _canonicalize_show_prec('capped-abs', 'digits', 'my_precision')
Traceback (most recent call last):
...
ValueError: show_prec must be either a boolean, 'none', 'bigoh' or 'dots' when printing mode is digits
"""
# Note that None means "choose the default for this ring", while 'none' means "don't print precision".
if show_prec is None:
show_prec = type not in ('floating-point', 'fixed-mod')
if show_prec is False:
return "none"
if show_prec is True:
if print_mode in ('series', 'terse', 'val-unit'):
return "bigoh"
else:
return "dots"
if print_mode in ('series', 'terse', 'val-unit'):
if show_prec not in ('none', 'bigoh'):
raise ValueError("show_prec must be either a boolean, 'none' or 'bigoh' when printing mode is %s" % print_mode)
else:
if show_prec not in ('none', 'bigoh', 'dots'):
raise ValueError("show_prec must be either a boolean, 'none', 'bigoh' or 'dots' when printing mode is %s" % print_mode)
return show_prec
def get_key_base(p, prec, type, print_mode, names, ram_name, print_pos, print_sep, print_alphabet, print_max_terms, show_prec, check, valid_types, label=None):
r"""
This implements create_key for Zp and Qp: moving it here prevents code duplication.
It fills in unspecified values and checks for contradictions in the input. It also standardizes irrelevant options so that duplicate parents are not created.
EXAMPLES::
sage: from sage.rings.padics.factory import get_key_base
sage: get_key_base(11, 5, 'capped-rel', None, None, None, None, ':', None, None, False, True, ['capped-rel'])
(11, 5, 'capped-rel', 'series', '11', True, '|', (), -1, 'none', None)
sage: get_key_base(12, 5, 'capped-rel', 'digits', None, None, None, None, None, None, True, False, ['capped-rel'])
(12,
5,
'capped-rel',
'digits',
'12',
True,
'|',
('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B'),
-1,
'dots',
None)
"""
if check:
if not isinstance(p, Integer):
p = Integer(p)
if not p.is_prime():
raise ValueError("p must be prime")
if type == 'lattice-cap':
relative_cap = absolute_cap = None
if prec is not None:
try:
relative_cap, absolute_cap = prec
except (ValueError, TypeError):
relative_cap = prec
if relative_cap is not None:
if relative_cap is not Infinity:
try:
relative_cap = Integer(relative_cap)
except TypeError:
raise TypeError("relative cap must be either a positive integer or infinity")
if relative_cap <= 0:
raise ValueError("relative cap must be positive")
if absolute_cap is not None:
try:
absolute_cap = Integer(absolute_cap)
except TypeError:
raise TypeError("absolute cap must be an integer")
if relative_cap is None and absolute_cap is None:
relative_cap = DEFAULT_PREC
absolute_cap = 2 * DEFAULT_PREC
elif relative_cap is None:
relative_cap = Infinity
elif absolute_cap is None:
absolute_cap = 2 * relative_cap
prec = (relative_cap, absolute_cap)
else:
if prec is not None:
prec = Integer(prec)
if prec is None:
if type == 'lattice-cap':
prec = (DEFAULT_PREC, 2*DEFAULT_PREC)
else:
prec = DEFAULT_PREC
print_ram_name = ram_name
if isinstance(print_mode, dict):
if 'pos' in print_mode:
print_pos = print_mode['pos']
if 'ram_name' in print_mode:
print_ram_name = print_mode['ram_name']
if 'unram_name' in print_mode:
print_unram_name = print_mode['unram_name']
if 'sep' in print_mode:
print_sep = print_mode['sep']
if 'alphabet' in print_mode:
print_alphabet = print_mode['alphabet']
if 'max_ram_terms' in print_mode:
print_max_terms = print_mode['max_ram_terms']
if 'max_terms' in print_mode:
print_max_terms = print_mode['max_terms']
if 'show_prec' in print_mode:
show_prec = print_mode['show_prec']
if 'mode' in print_mode:
print_mode = print_mode['mode']
else:
print_mode = None
if print_mode is None:
print_mode = padic_printing._printer_defaults.mode()
if print_pos is None:
print_pos = not padic_printing._printer_defaults.allow_negatives()
if print_sep is None:
print_sep = padic_printing._printer_defaults.sep()
if print_alphabet is None:
print_alphabet = padic_printing._printer_defaults.alphabet()
if print_max_terms is None:
print_max_terms = padic_printing._printer_defaults.max_series_terms()
# We eliminate irrelevant print options (e.g. print_pos if p = 2)
if p == 2 or print_mode == 'digits':
print_pos = True # we want this hard-coded so that we don't get duplicate parents if the keys differ.
if print_mode == 'digits':
print_ram_name = None
print_alphabet = print_alphabet[:p]
else:
print_alphabet = []
if print_mode != 'bars':
print_sep = '|'
if print_mode in ['terse', 'val-unit']:
print_max_terms = -1
if isinstance(names, tuple):
names = names[0]
if names is None and print_ram_name is None:
name = str(p)
elif names is not None and print_ram_name is not None:
if not isinstance(names, str):
names = str(names)
if not isinstance(print_ram_name, str):
print_ram_name = str(print_ram_name)
if names != print_ram_name:
raise ValueError("If both names (%s) and print_ram_name (%s) are specified, they must agree"%(names, print_ram_name))
name = names
else:
if names is None:
names = print_ram_name
if isinstance(names, str):
name = names
else:
name = str(names)
if type not in valid_types:
raise ValueError("type must be %s"%(", ".join(valid_types)))
show_prec = _canonicalize_show_prec(type, print_mode, show_prec)
key = (p, prec, type, print_mode, name, print_pos, print_sep, tuple(print_alphabet), print_max_terms, show_prec, label)
return key
#######################################################################################################
#
# p-Adic Fields
# Qp -- base field
# Qq -- unramified extension field of Qp
# QpCR, QpLC, QpLF, QqCR -- shortcuts for capped relative and lattice versions of Qp and Qq
#
#######################################################################################################
padic_field_cache = {}
DEFAULT_PREC = Integer(20)
class Qp_class(UniqueFactory):
r"""
A creation function for `p`-adic fields.
INPUT:
- ``p`` -- integer: the `p` in `\mathbb{Q}_p`
- ``prec`` -- integer (default: ``20``) the precision cap of the field.
In the lattice capped case, ``prec`` can either be a
pair (``relative_cap``, ``absolute_cap``) or an integer
(understood at relative cap).
Except in the floating point case, individual elements keep track of
their own precision. See TYPES and PRECISION below.
- ``type`` -- string (default: ``'capped-rel'``) Valid types are
``'capped-rel'``, ``'floating-point'``, ``'lattice-cap'``, ``'lattice-float'``.
See TYPES and PRECISION below
- ``print_mode`` -- string (default: ``None``). Valid modes are 'series',
'val-unit', 'terse', 'digits', and 'bars'. See PRINTING below
- ``names`` -- string or tuple (defaults to a string representation of
`p`). What to use whenever `p` is printed.
- ``ram_name`` -- string. Another way to specify the name; for
consistency with the ``Qq`` and ``Zq`` and extension functions.
- ``print_pos`` -- bool (default ``None``) Whether to only use positive
integers in the representations of elements. See PRINTING below.
- ``print_sep`` -- string (default ``None``) The separator character used
in the ``'bars'`` mode. See PRINTING below.
- ``print_alphabet`` -- tuple (default ``None``) The encoding into digits
for use in the 'digits' mode. See PRINTING below.
- ``print_max_terms`` -- integer (default ``None``) The maximum | |
# ------------------------------------------------------------
# PATCH NOTES
# ------------------------------------------------------------
# The changes in this version are as follows:
# * New grid
# * Don't use trap
# ! If digging on potential trap, nobody must be around this trap
# ! When moving, avoid having 2 robots nearby the same trap
# ! (Try to add the "and an enemy is nearby")
# ! Avoid "spwan mine wall" by having only one person at the base after turn X ?
# ------------------------------------------------------------
# IMPORTS
# ------------------------------------------------------------
import math
import sys
# ------------------------------------------------------------
# SETTINGS
# ------------------------------------------------------------
RADAR_SETUPS = {
1: [
(5, 0), # 0 will be replaced by robot.y
(10, 3),
(10, 11),
(15, 7),
(20, 3),
(20, 11),
(25, 7),
(28, 3),
(28, 11),
(3, 2),
(3, 12),
(14, 1),
(14, 13),
],
}
X_SETUPS = {
1: 4,
}
SETUP = 1
HARDCODED_RADARS = RADAR_SETUPS[SETUP]
FIRST_X = X_SETUPS[SETUP]
GET_TRAP_MIN_TURN = 0
GET_TRAP_MAX_TURN = 0
DIG_ENEMY_TRAP_MIN_TURN = 100
LOW_ORE = 10
COMMENTS = True
# ------------------------------------------------------------
# INITAL DATA
# ------------------------------------------------------------
# Game
WIDTH, HEIGHT = [int(i) for i in input().split()]
# Robots
ALL_ROBOTS = {}
MY_ROBOTS = []
ENEMY_ROBOTS = []
# Items
COOLDOWNS = {
"RADAR": 0,
"TRAP": 0
}
ITEM_NAMES = {
-1: None,
2: "RADAR",
3: "TRAP",
4: "ORE",
}
# Game
MY_SCORE = 0
ENEMY_SCORE = 0
TURN = 0
# Ore
CELLS_SCANNED = set()
CELLS_WITH_ORE = set()
# Holes
CELLS_WITHOUT_HOLE = set()
MY_HOLES = set()
ENEMY_HOLES = set()
NEW_HOLES = set()
# Traps
MY_TRAPS = set()
INCOMING_TRAPS = set()
ENEMY_TRAPS = set()
# ------------------------------------------------------------
# CLASS
# ------------------------------------------------------------
class Robot:
# --------------------
# Constants
# --------------------
ACTIONS = [
("play_dead", []),
("first_turn_action", []),
("trigger_trap", []),
("go_to_destination", []),
("bring_back_ore", []),
("pick_up_item", []),
("move_to_hardcoded_radar", []),
("burry_hardcoded_radar", []),
("dig_adjacent_ore", [True, 0]),
("move_to_ore", [True, 0]),
("go_get_radar", []),
("dig_adjacent_ore", [False, DIG_ENEMY_TRAP_MIN_TURN]),
("move_to_ore", [False, DIG_ENEMY_TRAP_MIN_TURN]),
("dig_unknown_cell", []),
("move_to_unknown_cell", []),
("wait_it_out", []),
]
GETTING_RADAR = False
# --------------------
# Core Methods
# --------------------
def __init__(self, id, type, cell):
"""Initializes our robot and its attributes"""
# General
self.dead = False
self.id = id
self.type = type
self.last_action = None
self.index = id if id < 5 else id - 5
# Dig setup
self.last_dig_cell = None
self.dig_objective = None
# Mouvement setup
self.position = cell
self.previous_position = cell
self.destination = None
# Item setup
self.item = None
self.get_first_radar = False
self.getting_radar = False
self.hardcoded_radar_cell = None
# --------------------
# Game Actions
# --------------------
def dig(self, cell, comment=""):
"""Gives the "DIG" order. The robot will dig on the given cell (x, y)"""
self.real_time_dig_update(cell)
self.last_dig_cell = cell
self.dig_objective = None
self.last_action = "DIG"
if not COMMENTS:
comment = ""
print("DIG", *cell, comment)
def move(self, cell, closest=True, comment=""):
"""Gives the "MOVE" order. The robot will move towards the given cell (x, y)"""
if closest:
self.dig_objective = cell
cell = self.get_closest_unoccupied_cell(self.position, cell)
self.destination = cell
self.last_action = "MOVE"
comment += " " + str(self.dig_objective)
if not COMMENTS:
comment = ""
print("MOVE", *cell, comment)
def request(self, item, comment=""):
"""Gives the "REQUEST" order. The robots asks for a RADAR or a TRAP"""
COOLDOWNS[item] = 5
self.item = item
self.last_action = "REQUEST"
if not COMMENTS:
comment = ""
print("REQUEST", item, comment)
def wait(self, comment=""):
"""Gives the "WAIT" order. The robot does nothing"""
self.last_action = "WAIT"
if not COMMENTS:
comment = ""
print("WAIT", comment)
# --------------------
# ACTION CHOICES
# --------------------
def play(self):
for function_name, args in self.ACTIONS:
function = getattr(self, function_name)
if len(args) > 0:
done = function(*args)
else:
done = function()
if done:
break
def play_dead(self):
if self.dead:
self.wait("play dead")
return True
def first_turn_action(self):
if TURN == 1:
if self.get_first_radar:
self.request("RADAR", "first turn action")
else:
cell = (FIRST_X, self.y)
self.move(cell, False, "first turn action")
return True
def trigger_trap(self):
adjacent_cells = get_adjacent_cells(self.position)
for cell in adjacent_cells:
if cell in MY_TRAPS:
friendly_robots = len(adjacent_robots(cell, 0))
enemy_robots = len(adjacent_robots(cell, 1))
if friendly_robots < enemy_robots:
self.dig(cell, "trigger trap")
return True
def go_to_destination(self):
if self.destination is not None:
self.move(self.destination, False, "go to destination")
return True
def bring_back_ore(self):
if self.item == "ORE":
cell = (0, self.y)
self.move(cell, False, "bring back ore")
return True
def pick_up_item(self):
if self.item is None and self.x == 0:
if not COOLDOWNS["RADAR"] and calculate_safe_ore() < LOW_ORE:
Robot.GETTING_RADAR = False
self.getting_radar = False
self.request("RADAR", "pick up item")
return True
elif not COOLDOWNS["TRAP"]:
if TURN >= GET_TRAP_MIN_TURN and TURN <= GET_TRAP_MAX_TURN and not most_alive_robots():
self.request("TRAP", "pick up item")
return True
def move_to_hardcoded_radar(self):
if self.item == "RADAR" and self.destination is None and self.x == 0:
if len(HARDCODED_RADARS) > 0:
cell = self.choose_which_hardcoded_radar()
if self.get_first_radar:
self.get_first_radar = False
cell = (cell[0], self.y)
self.hardcoded_radar_cell = cell
self.move(cell, True, "move to hardcoded radar")
return True
def burry_hardcoded_radar(self):
if self.hardcoded_radar_cell is not None and self.destination is None:
radar_cell = self.hardcoded_radar_cell
self.hardcoded_radar_cell = None
if radar_cell in MY_TRAPS.union(ENEMY_TRAPS):
cells = get_adjacent_cells(self.position)
for cell in cells:
if cell not in MY_TRAPS.union(ENEMY_TRAPS):
radar_cell = cell
break
else:
radar_cell = None
if radar_cell is not None:
self.dig(radar_cell, "burry hardcoded radar")
return True
def dig_adjacent_ore(self, avoid_enemy_traps=True, min_turn=0):
if TURN > min_turn:
alive_robots = [robot for robot in MY_ROBOTS if not robot.dead]
if avoid_enemy_traps or len(alive_robots) > 2:
traps = get_traps(avoid_enemy_traps)
adjacent_cells = get_adjacent_cells(self.position)
for cell in adjacent_cells:
if cell not in traps and cell in CELLS_WITH_ORE:
self.dig(cell, "dig adjacent ore ({})".format(avoid_enemy_traps))
return True
def move_to_ore(self, avoid_enemy_traps=True, min_turn=0):
if TURN > min_turn:
alive_robots = [robot for robot in MY_ROBOTS if not robot.dead]
if avoid_enemy_traps or len(alive_robots) > 2:
traps = get_traps(avoid_enemy_traps)
sorted_cells = sort_cells_closest(self.position, CELLS_WITH_ORE)
sorted_cells = list(filter(lambda x: x not in traps, sorted_cells))
for cell in sorted_cells:
robot_amount = len(friendly_robots_working_this_cell(cell))
ore = MAP_DATA[cell]["ore"]
if avoid_enemy_traps or robot_amount == 0:
if robot_amount < ore:
self.move(cell, True, "move to ore ({})".format(avoid_enemy_traps))
return True
def go_get_radar(self):
if len(HARDCODED_RADARS) > 0 and not Robot.GETTING_RADAR:
turn_to_base = math.ceil(self.x / 4)
if turn_to_base > COOLDOWNS["RADAR"]:
Robot.GETTING_RADAR = True
self.getting_radar = True
cell = (0, self.y)
self.move(cell, False, "go get radar")
return True
def dig_unknown_cell(self):
adjacent_cells = get_adjacent_cells(self.position)
for cell in adjacent_cells:
if not MAP_DATA[cell]["hole"] and cell not in CELLS_SCANNED and cell[0] > 0:
self.dig(cell, "dig unknown cell")
return True
def move_to_unknown_cell(self):
unknown_cells = CELLS_WITHOUT_HOLE.difference(CELLS_SCANNED)
if len(unknown_cells) > 0:
sorted_cells = sort_cells_closest(self.position, unknown_cells)
for cell in sorted_cells:
if cell[0] > 0:
self.move(cell, True, "move to unknown cell")
return True
def wait_it_out(self):
self.wait("wait it out")
return True
# --------------------
# Helper Methods
# --------------------
def choose_which_hardcoded_radar(self):
"""
Description:
Find the next closest hardcoded radar for your robot
The hardcoded radar is then removed from the list
Two radars are compared only when on the same column
Returns:
tuple: Coordinates (x, y) of the hardcoded radar
"""
found = False
if len(HARDCODED_RADARS) > 1:
x1, x2 = HARDCODED_RADARS[0][0], HARDCODED_RADARS[1][0]
if x1 == x2:
y1, y2 = HARDCODED_RADARS[0][1], HARDCODED_RADARS[1][1]
diff_y1, diff_y2 = abs(self.y - y1), abs(self.y - y2)
if diff_y2 < diff_y1:
cell = HARDCODED_RADARS.pop(1)
found = True
if not found:
cell = HARDCODED_RADARS.pop(0)
return cell
def get_closest_unoccupied_cell(self, start_cell, end_cell):
"""
Description:
Returns the closest adjacent cell of a "end_cell" relatively to a "start_cell"
Args:
start_cell (tuple): Coordinates (x, y) of the starting point
end_cell (tuple): Coordinates (x, y) of the ending point
Returns:
tuple: Coordinates (x, y) of the closest adjacent cell
"""
cells = get_adjacent_cells(end_cell)
sorted_cell = sort_cells_closest(start_cell, cells)
robots = [MY_ROBOTS[i] for i in range(self.index)]
for cell in sorted_cell:
occupied = False
for robot in robots:
if robot.position == cell:
occupied = True
break
if not occupied:
return cell
def guess_enemy_pickup_trap(self):
"""Guesses if an enemy has picked up a trap"""
if self.immobile and self.x == 0:
self.item = "TRAP"
def guess_enemy_potential_traps(self):
"""Guesses if a trap has been burried by an enemy"""
if self.immobile and self.x > 0 and self.item == "TRAP":
adjacent_cells = get_adjacent_cells(self.position)
# He made a new hole, 100% sure
for cell in adjacent_cells:
if cell in NEW_HOLES.intersection(ENEMY_HOLES):
robot_count = len(adjacent_robots(cell, 1))
if robot_count == 1:
self.item = None
self.last_dig_cell = cell
ENEMY_TRAPS.add(cell)
return
# If already existing holes, assume they have traps
for cell in adjacent_cells:
if cell in MY_HOLES.union(ENEMY_HOLES):
self.item = None
ENEMY_TRAPS.add(cell)
def just_died(self):
"""Checks if a robot just died this turn"""
if self.position == (-1, -1) and self.previous_position != self.position:
self.dead = True
self.dig_objective = None
if self.getting_radar:
Robot.GETTING_RADAR = False
self.getting_radar = False
return | |
LocalGeometryFinder, initializes the list of coordination geometries
:param permutations_safe_override: If set to True, all permutations are tested (very time-consuming for large
coordination numbers!)
:param plane_ordering_override: If set to False, the ordering of the points in the plane is disabled
"""
self.allcg = AllCoordinationGeometries(
permutations_safe_override=permutations_safe_override,
only_symbols=only_symbols,
)
self.permutations_safe_override = permutations_safe_override
self.plane_ordering_override = plane_ordering_override
self.plane_safe_permutations = plane_safe_permutations
self.setup_parameters(
centering_type="centroid",
include_central_site_in_centroid=True,
bva_distance_scale_factor=None,
structure_refinement=self.STRUCTURE_REFINEMENT_NONE,
)
print(chemenv_citations())
def setup_parameters(
self,
centering_type="standard",
include_central_site_in_centroid=False,
bva_distance_scale_factor=None,
structure_refinement=STRUCTURE_REFINEMENT_REFINED,
spg_analyzer_options=None,
):
"""
Setup of the parameters for the coordination geometry finder. A reference point for the geometries has to be
chosen. This can be the centroid of the structure (including or excluding the atom for which the coordination
geometry is looked for) or the atom itself. In the 'standard' centering_type, the reference point is the central
atom for coordination numbers 1, 2, 3 and 4 and the centroid for coordination numbers > 4.
:param centering_type: Type of the reference point (centering) 'standard', 'centroid' or 'central_site'
:param include_central_site_in_centroid: In case centering_type is 'centroid', the central site is included if
this value is set to True.
:param bva_distance_scale_factor: Scaling factor for the bond valence analyzer (this might be different whether
the structure is an experimental one, an LDA or a GGA relaxed one, or any other relaxation scheme (where
under- or over-estimation of bond lengths is known).
:param structure_refinement: Refinement of the structure. Can be "none", "refined" or "symmetrized".
:param spg_analyzer_options: Options for the SpaceGroupAnalyzer (dictionary specifying "symprec"
and "angle_tolerance". See pymatgen's SpaceGroupAnalyzer for more information.
"""
self.centering_type = centering_type
self.include_central_site_in_centroid = include_central_site_in_centroid
if bva_distance_scale_factor is not None:
self.bva_distance_scale_factor = bva_distance_scale_factor
else:
self.bva_distance_scale_factor = self.DEFAULT_BVA_DISTANCE_SCALE_FACTOR
self.structure_refinement = structure_refinement
if spg_analyzer_options is None:
self.spg_analyzer_options = self.DEFAULT_SPG_ANALYZER_OPTIONS
else:
self.spg_analyzer_options = spg_analyzer_options
def setup_parameter(self, parameter, value):
"""
Setup of one specific parameter to the given value. The other parameters are unchanged. See setup_parameters
method for the list of possible parameters
:param parameter: Parameter to setup/update
:param value: Value of the parameter
"""
self.__dict__[parameter] = value
def setup_structure(self, structure):
"""
Sets up the structure for which the coordination geometries have to be identified. The structure is analyzed
with the space group analyzer and a refined structure is used
:param structure: A pymatgen Structure
"""
self.initial_structure = structure.copy()
if self.structure_refinement == self.STRUCTURE_REFINEMENT_NONE:
self.structure = structure.copy()
self.spg_analyzer = None
self.symmetrized_structure = None
else:
self.spg_analyzer = SpacegroupAnalyzer(
self.initial_structure,
symprec=self.spg_analyzer_options["symprec"],
angle_tolerance=self.spg_analyzer_options["angle_tolerance"],
)
if self.structure_refinement == self.STRUCTURE_REFINEMENT_REFINED:
self.structure = self.spg_analyzer.get_refined_structure()
self.symmetrized_structure = None
elif self.structure_refinement == self.STRUCTURE_REFINEMENT_SYMMETRIZED:
self.structure = self.spg_analyzer.get_refined_structure()
self.spg_analyzer_refined = SpacegroupAnalyzer(
self.structure,
symprec=self.spg_analyzer_options["symprec"],
angle_tolerance=self.spg_analyzer_options["angle_tolerance"],
)
self.symmetrized_structure = self.spg_analyzer_refined.get_symmetrized_structure()
def get_structure(self):
"""
Returns the pymatgen Structure that has been setup for the identification of geometries (the initial one
might have been refined/symmetrized using the SpaceGroupAnalyzer).
:return: The pymatgen Structure that has been setup for the identification of geometries (the initial one
might have been refined/symmetrized using the SpaceGroupAnalyzer).
"""
return self.structure
def set_structure(self, lattice, species, coords, coords_are_cartesian):
"""
Sets up the pymatgen structure for which the coordination geometries have to be identified starting from the
lattice, the species and the coordinates
:param lattice: The lattice of the structure
:param species: The species on the sites
:param coords: The coordinates of the sites
:param coords_are_cartesian: If set to True, the coordinates are given in cartesian coordinates
"""
self.setup_structure(Structure(lattice, species, coords, coords_are_cartesian))
def compute_coordination_environments(
self,
structure,
indices=None,
only_cations=True,
strategy=DEFAULT_STRATEGY,
valences="bond-valence-analysis",
initial_structure_environments=None,
):
"""
:param structure:
:param indices:
:param only_cations:
:param strategy:
:param valences:
:param initial_structure_environments:
:return:
"""
self.setup_structure(structure=structure)
if valences == "bond-valence-analysis":
bva = BVAnalyzer()
try:
vals = bva.get_valences(structure=structure)
except ValueError:
vals = "undefined"
else:
if valences == "undefined":
vals = valences
else:
if len(valences) != len(structure):
raise ValueError("Valences do not match the number of sites in the structure")
vals = valences
# TODO: add something to compute only the neighbors sets needed for the strategy.
se = self.compute_structure_environments(
only_cations=only_cations,
only_indices=indices,
valences=vals,
initial_structure_environments=initial_structure_environments,
)
lse = LightStructureEnvironments.from_structure_environments(strategy=strategy, structure_environments=se)
return lse.coordination_environments
def compute_structure_environments(
self,
excluded_atoms=None,
only_atoms=None,
only_cations=True,
only_indices=None,
maximum_distance_factor=PRESETS["DEFAULT"]["maximum_distance_factor"],
minimum_angle_factor=PRESETS["DEFAULT"]["minimum_angle_factor"],
max_cn=None,
min_cn=None,
only_symbols=None,
valences="undefined",
additional_conditions=None,
info=None,
timelimit=None,
initial_structure_environments=None,
get_from_hints=False,
voronoi_normalized_distance_tolerance=PRESETS["DEFAULT"]["voronoi_normalized_distance_tolerance"],
voronoi_normalized_angle_tolerance=PRESETS["DEFAULT"]["voronoi_normalized_angle_tolerance"],
recompute=None,
optimization=PRESETS["DEFAULT"]["optimization"],
):
"""
Computes and returns the StructureEnvironments object containing all the information about the coordination
environments in the structure
:param excluded_atoms: Atoms for which the coordination geometries does not have to be identified
:param only_atoms: If not set to None, atoms for which the coordination geometries have to be identified
:param only_cations: If set to True, will only compute environments for cations
:param only_indices: If not set to None, will only compute environments the atoms of the given indices
:param maximum_distance_factor: If not set to None, neighbors beyond
maximum_distance_factor*closest_neighbor_distance are not considered
:param minimum_angle_factor: If not set to None, neighbors for which the angle is lower than
minimum_angle_factor*largest_angle_neighbor are not considered
:param max_cn: maximum coordination number to be considered
:param min_cn: minimum coordination number to be considered
:param only_symbols: if not set to None, consider only coordination environments with the given symbols
:param valences: valences of the atoms
:param additional_conditions: additional conditions to be considered in the bonds (example : only bonds
between cation and anion
:param info: additional info about the calculation
:param timelimit: time limit (in secs) after which the calculation of the StructureEnvironments object stops
:param initial_structure_environments: initial StructureEnvironments object (most probably incomplete)
:param get_from_hints: whether to add neighbors sets from "hints" (e.g. capped environment => test the
neighbors without the cap)
:param voronoi_normalized_distance_tolerance: tolerance for the normalized distance used to distinguish
neighbors sets
:param voronoi_normalized_angle_tolerance: tolerance for the normalized angle used to distinguish
neighbors sets
:param recompute: whether to recompute the sites already computed (when initial_structure_environments
is not None)
:param optimization: optimization algorithm
:return: The StructureEnvironments object containing all the information about the coordination
environments in the structure
"""
time_init = time.process_time()
if info is None:
info = {}
info.update(
{
"local_geometry_finder": {
"parameters": {
"centering_type": self.centering_type,
"include_central_site_in_centroid": self.include_central_site_in_centroid,
"structure_refinement": self.structure_refinement,
"spg_analyzer_options": self.spg_analyzer_options,
}
}
}
)
if only_symbols is not None:
self.allcg = AllCoordinationGeometries(
permutations_safe_override=self.permutations_safe_override,
only_symbols=only_symbols,
)
if valences == "undefined":
firstsite = self.structure[0]
try:
sp = firstsite.specie
if isinstance(sp, Species):
self.valences = [int(site.specie.oxi_state) for site in self.structure]
else:
self.valences = valences
except AttributeError:
self.valences = valences
else:
self.valences = valences
# Get a list of indices of unequivalent sites from the initial structure
self.equivalent_sites = [[site] for site in self.structure]
self.struct_sites_to_irreducible_site_list_map = list(range(len(self.structure)))
self.sites_map = list(range(len(self.structure)))
indices = list(range(len(self.structure)))
# Get list of unequivalent sites with valence >= 0
if only_cations and self.valences != "undefined":
sites_indices = [isite for isite in indices if self.valences[isite] >= 0]
else:
sites_indices = list(indices)
# Include atoms that are in the list of "only_atoms" if it is provided
if only_atoms is not None:
sites_indices = [
isite
for isite in sites_indices
if any([at in [sp.symbol for sp in self.structure[isite].species] for at in only_atoms])
]
# Exclude atoms that are in the list of excluded atoms
if excluded_atoms:
sites_indices = [
isite
for isite in sites_indices
if not any([at in [sp.symbol for sp in self.structure[isite].species] for at in excluded_atoms])
]
if only_indices is not None:
sites_indices = [isite for isite in indices if isite in only_indices]
# Get the VoronoiContainer for the sites defined by their indices (sites_indices)
logging.debug("Getting DetailedVoronoiContainer")
if voronoi_normalized_distance_tolerance is None:
normalized_distance_tolerance = DetailedVoronoiContainer.default_normalized_distance_tolerance
else:
normalized_distance_tolerance = voronoi_normalized_distance_tolerance
if voronoi_normalized_angle_tolerance is None:
normalized_angle_tolerance = DetailedVoronoiContainer.default_normalized_angle_tolerance
else:
normalized_angle_tolerance = voronoi_normalized_angle_tolerance
self.detailed_voronoi = DetailedVoronoiContainer(
self.structure,
isites=sites_indices,
valences=self.valences,
maximum_distance_factor=maximum_distance_factor,
minimum_angle_factor=minimum_angle_factor,
additional_conditions=additional_conditions,
normalized_distance_tolerance=normalized_distance_tolerance,
normalized_angle_tolerance=normalized_angle_tolerance,
)
logging.debug("DetailedVoronoiContainer has been set up")
# Initialize the StructureEnvironments object (either from initial_structure_environments or from scratch)
if initial_structure_environments is not None:
se = initial_structure_environments
if se.structure != self.structure:
raise ValueError("Structure is not the same in initial_structure_environments")
if se.voronoi != self.detailed_voronoi:
if self.detailed_voronoi.is_close_to(se.voronoi):
self.detailed_voronoi = se.voronoi
else:
raise ValueError("Detailed Voronoi is not the same in initial_structure_environments")
se.info = info
else:
se = StructureEnvironments(
voronoi=self.detailed_voronoi,
valences=self.valences,
sites_map=self.sites_map,
equivalent_sites=self.equivalent_sites,
ce_list=[None] * len(self.structure),
structure=self.structure,
info=info,
)
# Set up the coordination numbers that have to be computed based on | |
(jbev1 - jbev2) >=0
# ineq2 = (jbev1 - jbev3) >=0
# ineq3 = (jbev2 - jbev3) >=0
# #multiplications that are needed several times (this is for
# #optimization purposes)
# multXXY = ineq1*ineq2
# multYXX = ineq2*ineq3
# #get indices for the test cases (c1 for case 1, etc.)
# #(111) -> np.where[1*1*1=1 >0]
# #c1 = np.where(multXXY * ineq3 >0)
# #we don't need the first case as there is no change
# #in the order
# #(110) -> np.where[multiply ( (1*1=1) AND (where x<1) )]
# c2 = np.where((multXXY * (ineq3<1)) >0)
# #(100) -> np.where[multiply ( (where x>0) AND (where (0*0=0)<1) )>0]
# c4 = np.where((ineq1 * (multYXX<1)) >0)
# #(011) -> np.where[multiply ( (where x<1) AND (1*1=1) )>0]
# c5 = np.where(((ineq1<1) * multYXX) >0)
# #(001) -> np.where[multiply ( (where (0*0=0)<1) AND (where x>0) )>0]
# c7 = np.where(((multXXY<1) * ineq3) >0)
# #(000) -> np.where[0*0*0=0<1]
# c8 = np.where((ineq1 * multYXX) <1)
# #handle the cases
# temp = np.zeros(np.shape(jbev1)) #temporary value saver
# tempvec = np.zeros(np.shape(jbevc1)) #temporary value saver for eigvecs
# #case 2, change order of k2, k3
# temp[c2] = jbev2[c2]
# jbev2[c2] = jbev3[c2]
# jbev3[c2] = temp[c2]
# #and now the eigenvalues
# tempvec[c2] = jbevc2[c2]
# jbevc2[c2] = jbevc3[c2]
# jbevc3[c2] = tempvec[c2]
# #case 4, change order to k3, k1, k2
# #switch k2,k3 first, then k1, k3
# temp[c4] = jbev2[c4]
# jbev2[c4] = jbev3[c4]
# jbev3[c4] = temp[c4]
# #and now the eigenvalues
# tempvec[c4] = jbevc2[c4]
# jbevc2[c4] = jbevc3[c4]
# jbevc3[c4] = tempvec[c4]
# #k1 is still in k1 position, k3 is now in k2 position
# temp[c4] = jbev1[c4]
# jbev1[c4] = jbev2[c4]
# jbev2[c4] = temp[c4]
# #and now the eigenvalues
# tempvec[c4] = jbevc1[c4]
# jbevc1[c4] = jbevc2[c4]
# jbevc2[c4] = tempvec[c4]
# #case 5, change order of k1, k2
# temp[c5] = jbev1[c5]
# jbev1[c5] = jbev2[c5]
# jbev2[c5] = temp[c5]
# #and now the eigenvalues
# tempvec[c5] = jbevc1[c5]
# jbevc1[c5] = jbevc2[c5]
# jbevc2[c5] = tempvec[c5]
# #case 7, change order to k2, k3, k1
# #switch k1,k2 first, then k1, k3
# temp[c7] = jbev1[c7]
# jbev1[c7] = jbev2[c7]
# jbev2[c7] = temp[c7]
# #and now the eigenvalues
# tempvec[c7] = jbevc1[c7]
# jbevc1[c7] = jbevc2[c7]
# jbevc2[c7] = tempvec[c7]
# #k3 is still in k3 position, k1 is now in k2 position
# temp[c7] = jbev3[c7]
# jbev3[c7] = jbev2[c7]
# jbev2[c7] = temp[c7]
# #and now the eigenvalues
# tempvec[c7] = jbevc3[c7]
# jbevc3[c7] = jbevc2[c7]
# jbevc2[c7] = tempvec[c7]
# #case 8, reverse order, i.e. k3, k2, k1. Swap k3 with k1.
# temp[c8] = jbev1[c8]
# jbev1[c8] = jbev3[c8]
# jbev3[c8] = temp[c8]
# #and now the eigenvalues
# tempvec[c8] = jbevc1[c8]
# jbevc1[c8] = jbevc3[c8]
# jbevc3[c8] = tempvec[c8]
return jbev1, jbev2, jbev3, jbevc1, jbevc2, jbevc3
def cepca_orderevals_error(kr, jbev1, jbev2, jbev3, jbevc1, jbevc2, jbevc3):
'''
Our problem is that the above eigenvalue matrix is not ordered
in a meaningful way, whereas the eigenvalues in the Pottmann07
paper are ordered, but in a unknown way.
One can assume, that the order is given by the biggest
elements being first (this assumption is handled in the above
if case). If one does not assume this, then there's not much
left to do except using the error and value estimates given
in the paper and looking for the right combination of
eigenvalues that fulfills these approximation.
One can then apply a method of choosing the combination with
the lowest amount of errors, for example by just summing up
all errors, averaging them and choosing the combination with
lowest average.
This is what is done here.
We have the three eigenvalues kappa1, kappa2, kappa3 from
above and either kappa1,kappa2, kappa1,kappa3, or
kappa2,kappa3 are the two eigenvalues referenced in the paper
to calculate the principal curvatures Kappa1, Kappa2. Note,
that in the above combinations the order can be switched, but
this does not lead to different results in the terms below
(except in 4.1), 4.2), but we'll deal with that by
calculating the error and averaging smart).
We know,
1) Kappa1 = 6/(pi*kr**6) * (kappa2-3*kappa1) + 8/(5*kr) ,
2) Kappa2 = 6/(pi*kr**6) * (kappa1-3*kappa2) + 8/(5*kr) ,
3) kappa2-kappa1 = pi/24 * (Kappa1-Kappa2) * kr**6 + O(kr**7),
4.1) kappa1 = (2*pi)/15 * kr**5
- (pi/48) * (3*Kappa1+Kappa2) * kr**6 + O(kr**7)
4.2) kappa2 = (2*pi)/15 * kr**5
- (pi/48) * (Kappa1+3*Kappa2) * kr**6 + O(kr**7)
4.3) kappa3 = (19*pi)/480 * kr**5
- (9*pi/512) * (Kappa1+Kappa2) * kr**6 + O(kr**7)
So, to get the error estimates, we will calculate Kappa1 and
Kappa2, i.e. 1)&2), for all three combinations and then compare
the difference of the two terms in 3), and then compare kappa1-3
to their respective terms in 4.1)-4.3).
Then, for each combination we sum up the above differences,
take the average.
Because the eigenvalues are saved in matrices and these might
be big, we won't create several matrices, but keep the ones
we have right now and change the order in the current one.
Therefore we only need one matrix to temporarily save values.
'''
error=np.zeros(np.shape(jbev1)+(3,), np.double) #contains
#error estimates
temp = np.zeros(np.shape(jbev1)) #temporary value saver for eigvals
tempvec = np.zeros(np.shape(jbevc1)) #temporary value saver for eigvecs
for i in range(0,3):
#calculate the errors for the current order, i.e. k1,k2
Kappa1 = 6/(np.pi*kr**6) * (jbev2-3*jbev1) + 8/(5*kr) #1)
Kappa2 = 6/(np.pi*kr**6) * (jbev1-3*jbev2) + 8/(5*kr) #2)
err1lft = jbev2-jbev1 #3) left side
err1rgh = np.pi/24 * (Kappa1-Kappa2) * kr**6 #3) right side
#weird formatting due to some strange spyder behavior,
#this resolves after a restart hopefully. Then I'll change
#the formatting here.
err2kap1 = (2*np.pi)/15 * kr**5\
- (np.pi/48) * (3*Kappa1+Kappa2) * kr**6
#4.1) right side
err2kap2 = (2*np.pi)/15 * kr**5\
- (np.pi/48) * (Kappa1+3*Kappa2) * kr**6
#4.2) right side
err2kap3 = (19*np.pi)/480 * kr**5 \
- (9*np.pi/512) * (Kappa1+Kappa2) * kr**6
#4.3) right side
error[:,:,:,i] = 0.25*np.abs(err1lft-err1rgh)\
+ 0.5*np.abs((jbev1+jbev2)\
-(err2kap1+err2kap2))\
+ 0.25*np.abs(jbev3-err2kap3)
#now to the swapping of eigenvalues, i.e. try a different
#combination
if i==0:
#now we swap jbev 2 with jbev3.
temp = jbev2
jbev2 = jbev3
jbev3 = temp
elif i==1:
#now we swap jbev 1 with what used to be jbev2 (now
#jbev3, position-wise)
temp = jbev1
jbev1 = jbev3
jbev3 = temp
#now we compare errors!
#first case is where the initial order k1,k2,k3 has the lowest
#error
case1 = np.where((error[:,:,:,0]-error[:,:,:,1]<=0)
* (error[:,:,:,0]-error[:,:,:,2]<=0))
#2nd case is where the order k1,k3,k2 has the lowest error
case2 = np.where((error[:,:,:,1]-error[:,:,:,0] <=0)
* (error[:,:,:,1]-error[:,:,:,2]<=0))
#3rd case is where the order k2,k3,k1 has the lowest error
case3 = np.where((error[:,:,:,2]-error[:,:,:,0]<=0)
* (error[:,:,:,2]-error[:,:,:,1] <=0))
#we need case 3 for the eigenvectors
#eigenvalue switching
#wherever case1,case2 is true, we have to switch the order
#back. In case 3, nothing has to happen.
#for case 2
temp[case2] = jbev1[case2] #current order is k2,k3,k1
jbev1[case2] = jbev3[case2] #k1,k3,k1
jbev3[case2] = temp[case2] #k1,k2,k2
#for case 1
temp[case1] = jbev1[case1] #current order is k2,k3,k1
jbev1[case1] = jbev3[case1] #k1,k3,k1
jbev3[case1] = temp[case1] #k1,k3,k2
temp[case1] = jbev2[case1]
jbev2[case1] = jbev3[case1] #k1,k2,k2
jbev3[case1] = temp[case1] #k1,k2,k3
#eigenvector switching
#wherever case2,case3 is true, we have to switch the order
#As the eigenvectors weren't rearranged yet, in case 1 nothing
#happens.
#for case 2 (we want k1,k3,k2)
tempvec[case2] = jbevc2[case2] #current order is k1,k2,k3
jbevc2[case2] = jbevc3[case2] #k1,k3,k3
jbevc3[case2] = tempvec[case2] #k1,k3,k2
#for case 3 (we want k2,k3,k1)
tempvec[case3] = jbevc2[case3] #current order is k1,k2,k3
jbevc2[case3] = jbevc3[case3] #k1,k3,k3
jbevc3[case3] = tempvec[case3] #k1,k3,k2
tempvec[case3] = jbevc1[case3]
jbevc1[case3] = jbevc3[case3] #k2,k3,k2
| |
<gh_stars>1-10
"""
Test distributed simulation.
"""
import cPickle
import glob
import hashlib
import logging
from math import pi
from multiprocessing import AuthenticationError, current_process
from multiprocessing.managers import RemoteError
import os
import shutil
import socket
import sys
import traceback
import unittest
import nose
from Crypto.Random import get_random_bytes
from openmdao.main.api import Assembly, Case, Component, Container, Driver, \
set_as_top
from openmdao.main.container import get_closest_proxy
from openmdao.main.hasobjective import HasObjectives
from openmdao.main.hasparameters import HasParameters
from openmdao.main.interfaces import IComponent
from openmdao.main.mp_support import has_interface, is_instance
from openmdao.main.mp_util import read_server_config
from openmdao.main.objserverfactory import connect, start_server, RemoteFile
from openmdao.main.rbac import Credentials, get_credentials, set_credentials, \
AccessController, RoleError, rbac
from openmdao.lib.datatypes.api import Float, Int
from openmdao.lib.casehandlers.listcaserecorder import ListCaseRecorder
from openmdao.test.execcomp import ExecComp
from openmdao.util.decorators import add_delegate
from openmdao.util.publickey import get_key_pair
from openmdao.util.testutil import assert_raises, assert_rel_error
# Used for naming classes we want to create instances of.
_MODULE = 'openmdao.main.test.test_distsim'
# Used for naming server directories.
_SERVER_ID = 0
class Box(ExecComp):
""" Simple component for testing. """
pid = Int(iotype='out')
def __init__(self):
super(Box, self).__init__([
'surface_area = (width*(height+depth) + depth*height)*2',
'volume = width*height*depth'])
self.pid = os.getpid()
# For get_closest_proxy().
sub = self.add('subcontainer', Container())
sub.add('subvar', Int())
def execute(self):
print 'Box.execute(), %f %f %f on %s:%d' \
% (self.width, self.height, self.depth,
socket.gethostname(), self.pid)
sys.stdout.flush()
super(Box, self).execute()
def no_rbac(self):
pass
@rbac('owner', proxy_types=[RemoteFile])
def open_in_parent(self, path, mode):
try:
return self.parent.open(path, mode)
except Exception as exc:
self._logger.debug('open_in_parent() caught %s:', exc)
self._logger.debug(traceback.format_exc())
@rbac('owner')
def cause_parent_error1(self):
return self.parent.no_such_variable
@rbac('owner')
def cause_parent_error2(self):
return self.parent.get_trait('no-such-trait')
@rbac('owner')
def cause_parent_error3(self):
return self.parent.xyzzy()
class HollowSphere(Component):
""" Simple component for testing. """
radius = Float(1.0, low=0., exclude_low=True, iotype='in', units='cm')
thickness = Float(0.05, iotype='in', units='cm')
inner_volume = Float(iotype='out', units='cm**3')
volume = Float(iotype='out', units='cm**3')
solid_volume = Float(iotype='out', units='cm**3')
surface_area = Float(iotype='out', units='cm**2')
pid = Int(iotype='out')
def __init__(self, doc=None, directory=''):
super(HollowSphere, self).__init__(doc, directory)
self.pid = os.getpid()
def execute(self):
self.surface_area = 4.0*pi*self.radius*self.radius
self.inner_volume = 4.0/3.0*pi*self.radius**3
self.volume = 4.0/3.0*pi*(self.radius+self.thickness)**3
self.solid_volume = self.volume-self.inner_volume
@add_delegate(HasParameters)
@add_delegate(HasObjectives)
class BoxDriver(Driver):
""" Just drives :class:`Box` inputs and records results. """
def __init__(self):
super(BoxDriver, self).__init__()
self.recorders = [ListCaseRecorder()]
def execute(self):
""" Runs with various box parameter values. """
for width in range(1, 2):
for height in range(1, 3):
for depth in range(1, 4):
self._logger.debug('w,h,d %s, %s, %s', width, height, depth)
self.set_parameters((width, height, depth))
self.workflow.run()
volume, area = self.eval_objectives()
self._logger.debug(' v,a %s, %s', volume, area)
case = Case()
case.inputs = [('width', None, width),
('height', None, height),
('depth', None, depth)]
case.outputs = [('volume', None, volume),
('area', None, area),
('pid', None, self.parent.box.pid)]
# Just to show access to remote from driver.
for recorder in self.recorders:
recorder.record(case)
class BoxSource(ExecComp):
""" Just a pass-through for :class:`BoxDriver` input values. """
def __init__(self):
super(BoxSource, self).__init__(['width_out = width_in',
'height_out = height_in',
'depth_out = depth_in'])
# For get_closest_proxy().
sub = self.add('subcontainer', Container())
sub.add('subvar', Int())
class BoxSink(ExecComp):
""" Just a pass-through for :class:`BoxDriver` result values. """
def __init__(self):
super(BoxSink, self).__init__(['volume_out = volume_in',
'area_out = area_in'])
class Model(Assembly):
""" Drive a remote :class:`Box` via connections to local components. """
def __init__(self, box):
super(Model, self).__init__()
self.add('driver', BoxDriver())
self.driver.workflow.add(self.add('source', BoxSource()).name)
self.driver.workflow.add(self.add('box', box).name)
self.driver.workflow.add(self.add('sink', BoxSink()).name)
self.driver.add_parameter('source.width_in', low=1e-99, high=1e99)
self.driver.add_parameter('source.height_in', low=1e-99, high=1e99)
self.driver.add_parameter('source.depth_in', low=1e-99, high=1e99)
self.connect('source.width_out', 'box.width')
self.connect('source.height_out', 'box.height')
self.connect('source.depth_out', 'box.depth')
self.connect('box.volume', 'sink.volume_in')
self.connect('box.surface_area', 'sink.area_in')
self.driver.add_objective('sink.volume_out')
self.driver.add_objective('sink.area_out')
@rbac('owner', proxy_types=[RemoteFile])
def open(self, path, mode):
""" Return opened file. """
return RemoteFile(open(path, mode))
@rbac('xyzzy')
def xyzzy(self):
""" No access by 'owner', etc. """
return None
class Protector(AccessController):
""" Special :class:`AccessController` to protect secrets. """
def check_access(self, role, methodname, obj, attr):
if not role:
raise RoleError('No access by null role')
if role == 'owner':
return
if methodname != '__delattr__' and self.user_attribute(obj, attr):
return
raise RoleError("No %s access to '%s' by role '%s'"
% (methodname, attr, role))
@staticmethod
def user_attribute(obj, attr):
if attr in obj.list_inputs() or \
attr in obj.list_outputs() or \
attr in ('parent', 'name'):
return True
return False
class ProtectedBox(Box):
""" Box which can be used but the innards are hidden. """
secret = Int()
def __init__(self):
super(ProtectedBox, self).__init__()
# Protector will use current credentials as 'owner'.
self.protector = Protector()
@rbac('owner')
def proprietary_method(self):
pass
def get_access_controller(self):
return self.protector
@rbac(('owner', 'user'))
def get(self, path, index=None):
if self.protector.user_attribute(self, path):
return super(ProtectedBox, self).get(path, index)
raise RoleError("No get access to '%s' by role '%s'" % (attr, role))
@rbac(('owner', 'user'))
def get_dyn_trait(self, name, iotype=None):
if self.protector.user_attribute(self, name):
return super(ProtectedBox, self).get_dyn_trait(name, iotype)
raise RoleError("No get access to '%s' by role '%s'" % (attr, role))
@rbac(('owner', 'user'))
def get_wrapped_attr(self, name):
if self.protector.user_attribute(self, name):
return super(ProtectedBox, self).get_wrapped_attr(name)
raise RoleError("No get_wrapped_attr access to '%s' by role '%s'"
% (attr, role))
@rbac(('owner', 'user'))
def set(self, path, value, index=None, srcname=None, force=False):
if self.protector.user_attribute(self, path):
return super(ProtectedBox, self).set(path, value, index, srcname, force)
raise RoleError("No set access to '%s' by role '%s'"
% (attr, role))
class TestCase(unittest.TestCase):
""" Test distributed simulation. """
def run(self, result=None):
"""
Record the :class:`TestResult` used so we can conditionally cleanup
directories in :meth:`tearDown`.
"""
self.test_result = result or unittest.TestResult()
return super(TestCase, self).run(self.test_result)
def setUp(self):
""" Called before each test. """
self.n_errors = len(self.test_result.errors)
self.n_failures = len(self.test_result.failures)
self.factories = []
self.servers = []
self.server_dirs = []
# Ensure we control directory cleanup.
self.keepdirs = os.environ.get('OPENMDAO_KEEPDIRS', '0')
os.environ['OPENMDAO_KEEPDIRS'] = '1'
def start_factory(self, port=None, allowed_users=None):
""" Start each factory process in a unique directory. """
global _SERVER_ID
_SERVER_ID += 1
server_dir = 'Factory_%d' % _SERVER_ID
if os.path.exists(server_dir):
shutil.rmtree(server_dir)
os.mkdir(server_dir)
os.chdir(server_dir)
self.server_dirs.append(server_dir)
try:
logging.debug('')
logging.debug('tester pid: %s', os.getpid())
logging.debug('starting server...')
if port is None:
# Exercise both AF_INET and AF_UNIX/AF_PIPE.
port = -1 if _SERVER_ID & 1 else 0
if allowed_users is None:
credentials = get_credentials()
allowed_users = {credentials.user: credentials.public_key}
allowed_types = ['openmdao.main.test.test_distsim.HollowSphere',
'openmdao.main.test.test_distsim.Box',
'openmdao.main.test.test_distsim.ProtectedBox']
server, server_cfg = start_server(port=port,
allowed_users=allowed_users,
allowed_types=allowed_types)
self.servers.append(server)
cfg = read_server_config(server_cfg)
self.address = cfg['address']
self.port = cfg['port']
self.tunnel = cfg['tunnel']
self.key = cfg['key']
logging.debug('server pid: %s', server.pid)
logging.debug('server address: %s', self.address)
logging.debug('server port: %s', self.port)
logging.debug('server key: %s', self.key)
finally:
os.chdir('..')
factory = connect(self.address, self.port, self.tunnel, pubkey=self.key)
self.factories.append(factory)
logging.debug('factory: %r', factory)
return factory
def tearDown(self):
""" Shut down server process. """
try:
for factory in self.factories:
factory.cleanup()
for server in self.servers:
logging.debug('terminating server pid %s', server.pid)
server.terminate(timeout=10)
# Cleanup only if there weren't any new errors or failures.
if len(self.test_result.errors) == self.n_errors and \
len(self.test_result.failures) == self.n_failures and \
not int(self.keepdirs):
for server_dir in self.server_dirs:
shutil.rmtree(server_dir)
finally:
os.environ['OPENMDAO_KEEPDIRS'] = self.keepdirs
def test_1_client(self):
logging.debug('')
logging.debug('test_client')
factory = self.start_factory()
# List available types.
types = factory.get_available_types()
logging.debug('Available types:')
for typname, version in types:
logging.debug(' %s %s', typname, version)
# First a HollowSphere, accessed via get()/set().
obj = factory.create(_MODULE+'.HollowSphere')
sphere_pid = obj.get('pid')
self.assertNotEqual(sphere_pid, os.getpid())
radius = obj.get('radius')
self.assertEqual(radius, 1.)
radius += 1
obj.set('radius', radius)
new_radius = obj.get('radius')
self.assertEqual(new_radius, 2.)
self.assertEqual(obj.get('inner_volume'), 0.)
self.assertEqual(obj.get('volume'), 0.)
self.assertEqual(obj.get('solid_volume'), 0.)
self.assertEqual(obj.get('surface_area'), 0.)
obj.run()
assert_rel_error(self, obj.get('inner_volume'), 33.510321638, 0.000001)
assert_rel_error(self, obj.get('volume'), 36.086951213, 0.000001)
assert_rel_error(self, obj.get('solid_volume'), 2.5766295747, 0.000001)
assert_rel_error(self, obj.get('surface_area'), 50.265482457, 0.000001)
msg = ": Variable 'radius' must be a float in the range (0.0, "
assert_raises(self, "obj.set('radius', -1)", globals(), locals(),
ValueError, msg)
# Now a Box, accessed via attribute methods.
obj = factory.create(_MODULE+'.Box')
box_pid = obj.get('pid')
self.assertNotEqual(box_pid, os.getpid())
self.assertNotEqual(box_pid, sphere_pid)
obj.width += 2
obj.height += 2
obj.depth += 2
self.assertEqual(obj.width, 2.)
self.assertEqual(obj.height, 2.)
self.assertEqual(obj.depth, 2.)
self.assertEqual(obj.volume, 0.)
self.assertEqual(obj.surface_area, 0.)
obj.run()
self.assertEqual(obj.volume, 8.0)
self.assertEqual(obj.surface_area, 24.0)
try:
obj.no_rbac()
except RemoteError as exc:
msg = "AttributeError: method 'no_rbac' of"
logging.debug('msg: %s', msg)
logging.debug('exc: %s', exc)
self.assertTrue(msg in str(exc))
else:
self.fail('Expected RemoteError')
def test_2_model(self):
logging.debug('')
logging.debug('test_model')
factory = self.start_factory()
# Create model and run it.
box = factory.create(_MODULE+'.Box')
model = set_as_top(Model(box))
model.run()
# Check results.
for width in range(1, 2):
for height in range(1, 3):
for depth in range(1, 4):
case = model.driver.recorders[0].cases.pop(0)
self.assertEqual(case.outputs[0][2], width*height*depth)
self.assertTrue(is_instance(model.box.parent, Assembly))
self.assertTrue(has_interface(model.box.parent, IComponent))
# Upcall to use parent to resolve sibling.
# At one time this caused proxy problems.
source = model.box.parent.source
self.assertEqual(source.width_in, 1.)
# Proxy resolution.
obj, path = get_closest_proxy(model, 'box.subcontainer.subvar')
self.assertEqual(obj, model.box)
self.assertEqual(path, 'subcontainer.subvar')
obj, path = get_closest_proxy(model, 'source.subcontainer.subvar')
self.assertEqual(obj, model.source.subcontainer)
self.assertEqual(path, 'subvar')
obj, path = get_closest_proxy(model.source.subcontainer, 'subvar')
self.assertEqual(obj, model.source.subcontainer)
self.assertEqual(path, 'subvar')
# Observable proxied type.
tmp = model.box.open_in_parent('tmp', 'w')
tmp.close()
os.remove('tmp')
# Cause server-side errors we can see.
try:
box.cause_parent_error1()
except RemoteError as exc:
msg = "AttributeError: attribute 'no_such_variable' of"
logging.debug('msg: %s', msg)
logging.debug('exc: %s', exc)
self.assertTrue(msg in str(exc))
else:
self.fail('Expected RemoteError')
try:
box.cause_parent_error2()
except RemoteError as exc:
msg = "AttributeError: method 'get_trait' of"
logging.debug('msg: %s', msg)
logging.debug('exc: %s', exc)
self.assertTrue(msg in str(exc))
else:
self.fail('Expected RemoteError')
| |
<filename>cdp/network.py
# DO NOT EDIT THIS FILE!
#
# This file is generated from the CDP specification. If you need to make
# changes, edit the generator and regenerate all of the modules.
#
# CDP domain: Network
from __future__ import annotations
from cdp.util import event_class, T_JSON_DICT
from dataclasses import dataclass
import enum
import typing
from . import debugger
from . import emulation
from . import io
from . import page
from . import runtime
from . import security
from deprecated.sphinx import deprecated # type: ignore
class ResourceType(enum.Enum):
r'''
Resource type as it was perceived by the rendering engine.
'''
DOCUMENT = "Document"
STYLESHEET = "Stylesheet"
IMAGE = "Image"
MEDIA = "Media"
FONT = "Font"
SCRIPT = "Script"
TEXT_TRACK = "TextTrack"
XHR = "XHR"
FETCH = "Fetch"
EVENT_SOURCE = "EventSource"
WEB_SOCKET = "WebSocket"
MANIFEST = "Manifest"
SIGNED_EXCHANGE = "SignedExchange"
PING = "Ping"
CSP_VIOLATION_REPORT = "CSPViolationReport"
PREFLIGHT = "Preflight"
OTHER = "Other"
def to_json(self) -> str:
return self.value
@classmethod
def from_json(cls, json: str) -> ResourceType:
return cls(json)
class LoaderId(str):
r'''
Unique loader identifier.
'''
def to_json(self) -> str:
return self
@classmethod
def from_json(cls, json: str) -> LoaderId:
return cls(json)
def __repr__(self):
return 'LoaderId({})'.format(super().__repr__())
class RequestId(str):
r'''
Unique request identifier.
'''
def to_json(self) -> str:
return self
@classmethod
def from_json(cls, json: str) -> RequestId:
return cls(json)
def __repr__(self):
return 'RequestId({})'.format(super().__repr__())
class InterceptionId(str):
r'''
Unique intercepted request identifier.
'''
def to_json(self) -> str:
return self
@classmethod
def from_json(cls, json: str) -> InterceptionId:
return cls(json)
def __repr__(self):
return 'InterceptionId({})'.format(super().__repr__())
class ErrorReason(enum.Enum):
r'''
Network level fetch failure reason.
'''
FAILED = "Failed"
ABORTED = "Aborted"
TIMED_OUT = "TimedOut"
ACCESS_DENIED = "AccessDenied"
CONNECTION_CLOSED = "ConnectionClosed"
CONNECTION_RESET = "ConnectionReset"
CONNECTION_REFUSED = "ConnectionRefused"
CONNECTION_ABORTED = "ConnectionAborted"
CONNECTION_FAILED = "ConnectionFailed"
NAME_NOT_RESOLVED = "NameNotResolved"
INTERNET_DISCONNECTED = "InternetDisconnected"
ADDRESS_UNREACHABLE = "AddressUnreachable"
BLOCKED_BY_CLIENT = "BlockedByClient"
BLOCKED_BY_RESPONSE = "BlockedByResponse"
def to_json(self) -> str:
return self.value
@classmethod
def from_json(cls, json: str) -> ErrorReason:
return cls(json)
class TimeSinceEpoch(float):
r'''
UTC time in seconds, counted from January 1, 1970.
'''
def to_json(self) -> float:
return self
@classmethod
def from_json(cls, json: float) -> TimeSinceEpoch:
return cls(json)
def __repr__(self):
return 'TimeSinceEpoch({})'.format(super().__repr__())
class MonotonicTime(float):
r'''
Monotonically increasing time in seconds since an arbitrary point in the past.
'''
def to_json(self) -> float:
return self
@classmethod
def from_json(cls, json: float) -> MonotonicTime:
return cls(json)
def __repr__(self):
return 'MonotonicTime({})'.format(super().__repr__())
class Headers(dict):
r'''
Request / response headers as keys / values of JSON object.
'''
def to_json(self) -> dict:
return self
@classmethod
def from_json(cls, json: dict) -> Headers:
return cls(json)
def __repr__(self):
return 'Headers({})'.format(super().__repr__())
class ConnectionType(enum.Enum):
r'''
The underlying connection technology that the browser is supposedly using.
'''
NONE = "none"
CELLULAR2G = "cellular2g"
CELLULAR3G = "cellular3g"
CELLULAR4G = "cellular4g"
BLUETOOTH = "bluetooth"
ETHERNET = "ethernet"
WIFI = "wifi"
WIMAX = "wimax"
OTHER = "other"
def to_json(self) -> str:
return self.value
@classmethod
def from_json(cls, json: str) -> ConnectionType:
return cls(json)
class CookieSameSite(enum.Enum):
r'''
Represents the cookie's 'SameSite' status:
https://tools.ietf.org/html/draft-west-first-party-cookies
'''
STRICT = "Strict"
LAX = "Lax"
NONE = "None"
def to_json(self) -> str:
return self.value
@classmethod
def from_json(cls, json: str) -> CookieSameSite:
return cls(json)
class CookiePriority(enum.Enum):
r'''
Represents the cookie's 'Priority' status:
https://tools.ietf.org/html/draft-west-cookie-priority-00
'''
LOW = "Low"
MEDIUM = "Medium"
HIGH = "High"
def to_json(self) -> str:
return self.value
@classmethod
def from_json(cls, json: str) -> CookiePriority:
return cls(json)
class CookieSourceScheme(enum.Enum):
r'''
Represents the source scheme of the origin that originally set the cookie.
A value of "Unset" allows protocol clients to emulate legacy cookie scope for the scheme.
This is a temporary ability and it will be removed in the future.
'''
UNSET = "Unset"
NON_SECURE = "NonSecure"
SECURE = "Secure"
def to_json(self) -> str:
return self.value
@classmethod
def from_json(cls, json: str) -> CookieSourceScheme:
return cls(json)
@dataclass
class ResourceTiming:
r'''
Timing information for the request.
'''
#: Timing's requestTime is a baseline in seconds, while the other numbers are ticks in
#: milliseconds relatively to this requestTime.
request_time: float
#: Started resolving proxy.
proxy_start: float
#: Finished resolving proxy.
proxy_end: float
#: Started DNS address resolve.
dns_start: float
#: Finished DNS address resolve.
dns_end: float
#: Started connecting to the remote host.
connect_start: float
#: Connected to the remote host.
connect_end: float
#: Started SSL handshake.
ssl_start: float
#: Finished SSL handshake.
ssl_end: float
#: Started running ServiceWorker.
worker_start: float
#: Finished Starting ServiceWorker.
worker_ready: float
#: Started fetch event.
worker_fetch_start: float
#: Settled fetch event respondWith promise.
worker_respond_with_settled: float
#: Started sending request.
send_start: float
#: Finished sending request.
send_end: float
#: Time the server started pushing request.
push_start: float
#: Time the server finished pushing request.
push_end: float
#: Finished receiving response headers.
receive_headers_end: float
def to_json(self) -> T_JSON_DICT:
json: T_JSON_DICT = dict()
json['requestTime'] = self.request_time
json['proxyStart'] = self.proxy_start
json['proxyEnd'] = self.proxy_end
json['dnsStart'] = self.dns_start
json['dnsEnd'] = self.dns_end
json['connectStart'] = self.connect_start
json['connectEnd'] = self.connect_end
json['sslStart'] = self.ssl_start
json['sslEnd'] = self.ssl_end
json['workerStart'] = self.worker_start
json['workerReady'] = self.worker_ready
json['workerFetchStart'] = self.worker_fetch_start
json['workerRespondWithSettled'] = self.worker_respond_with_settled
json['sendStart'] = self.send_start
json['sendEnd'] = self.send_end
json['pushStart'] = self.push_start
json['pushEnd'] = self.push_end
json['receiveHeadersEnd'] = self.receive_headers_end
return json
@classmethod
def from_json(cls, json: T_JSON_DICT) -> ResourceTiming:
return cls(
request_time=float(json['requestTime']),
proxy_start=float(json['proxyStart']),
proxy_end=float(json['proxyEnd']),
dns_start=float(json['dnsStart']),
dns_end=float(json['dnsEnd']),
connect_start=float(json['connectStart']),
connect_end=float(json['connectEnd']),
ssl_start=float(json['sslStart']),
ssl_end=float(json['sslEnd']),
worker_start=float(json['workerStart']),
worker_ready=float(json['workerReady']),
worker_fetch_start=float(json['workerFetchStart']),
worker_respond_with_settled=float(json['workerRespondWithSettled']),
send_start=float(json['sendStart']),
send_end=float(json['sendEnd']),
push_start=float(json['pushStart']),
push_end=float(json['pushEnd']),
receive_headers_end=float(json['receiveHeadersEnd']),
)
class ResourcePriority(enum.Enum):
r'''
Loading priority of a resource request.
'''
VERY_LOW = "VeryLow"
LOW = "Low"
MEDIUM = "Medium"
HIGH = "High"
VERY_HIGH = "VeryHigh"
def to_json(self) -> str:
return self.value
@classmethod
def from_json(cls, json: str) -> ResourcePriority:
return cls(json)
@dataclass
class PostDataEntry:
r'''
Post data entry for HTTP request
'''
bytes_: typing.Optional[str] = None
def to_json(self) -> T_JSON_DICT:
json: T_JSON_DICT = dict()
if self.bytes_ is not None:
json['bytes'] = self.bytes_
return json
@classmethod
def from_json(cls, json: T_JSON_DICT) -> PostDataEntry:
return cls(
bytes_=str(json['bytes']) if 'bytes' in json else None,
)
@dataclass
class Request:
r'''
HTTP request data.
'''
#: Request URL (without fragment).
url: str
#: HTTP request method.
method: str
#: HTTP request headers.
headers: Headers
#: Priority of the resource request at the time request is sent.
initial_priority: ResourcePriority
#: The referrer policy of the request, as defined in https://www.w3.org/TR/referrer-policy/
referrer_policy: str
#: Fragment of the requested URL starting with hash, if present.
url_fragment: typing.Optional[str] = None
#: HTTP POST request data.
post_data: typing.Optional[str] = None
#: True when the request has POST data. Note that postData might still be omitted when this flag is true when the data is too long.
has_post_data: typing.Optional[bool] = None
#: Request body elements. This will be converted from base64 to binary
post_data_entries: typing.Optional[typing.List[PostDataEntry]] = None
#: The mixed content type of the request.
mixed_content_type: typing.Optional[security.MixedContentType] = None
#: Whether is loaded via link preload.
is_link_preload: typing.Optional[bool] = None
#: Set for requests when the TrustToken API is used. Contains the parameters
#: passed by the developer (e.g. via "fetch") as understood by the backend.
trust_token_params: typing.Optional[TrustTokenParams] = None
#: True if this resource request is considered to be the 'same site' as the
#: request correspondinfg to the main frame.
is_same_site: typing.Optional[bool] = None
def to_json(self) -> T_JSON_DICT:
json: T_JSON_DICT = dict()
json['url'] = self.url
json['method'] = self.method
json['headers'] = self.headers.to_json()
json['initialPriority'] = self.initial_priority.to_json()
json['referrerPolicy'] = self.referrer_policy
if self.url_fragment is not None:
json['urlFragment'] = self.url_fragment
if self.post_data is not None:
json['postData'] = self.post_data
if self.has_post_data is not None:
json['hasPostData'] = self.has_post_data
if self.post_data_entries is not None:
json['postDataEntries'] = [i.to_json() for i in self.post_data_entries]
if self.mixed_content_type is not None:
json['mixedContentType'] = self.mixed_content_type.to_json()
if self.is_link_preload is not None:
json['isLinkPreload'] = self.is_link_preload
if self.trust_token_params is not None:
json['trustTokenParams'] = self.trust_token_params.to_json()
if self.is_same_site is not None:
json['isSameSite'] = self.is_same_site
return json
@classmethod
def from_json(cls, json: T_JSON_DICT) -> Request:
return cls(
url=str(json['url']),
method=str(json['method']),
headers=Headers.from_json(json['headers']),
initial_priority=ResourcePriority.from_json(json['initialPriority']),
referrer_policy=str(json['referrerPolicy']),
url_fragment=str(json['urlFragment']) if 'urlFragment' in json else None,
post_data=str(json['postData']) if 'postData' in json else None,
has_post_data=bool(json['hasPostData']) if 'hasPostData' in json else None,
post_data_entries=[PostDataEntry.from_json(i) for i in json['postDataEntries']] if 'postDataEntries' in json else None,
mixed_content_type=security.MixedContentType.from_json(json['mixedContentType']) if 'mixedContentType' in json else None,
is_link_preload=bool(json['isLinkPreload']) if 'isLinkPreload' in json else None,
trust_token_params=TrustTokenParams.from_json(json['trustTokenParams']) if 'trustTokenParams' in json else None,
is_same_site=bool(json['isSameSite']) if 'isSameSite' in json else None,
)
@dataclass
class SignedCertificateTimestamp:
r'''
Details of a signed certificate timestamp (SCT).
| |
##########################################################################
#
# Copyright (c) 2020, Cinesite VFX Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of Cinesite VFX Ltd. nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferUI
import GafferUITest
import IECore
import unittest
import GafferUI.SpreadsheetUI._ClipboardAlgo as _ClipboardAlgo
class SpreadsheetUITest( GafferUITest.TestCase ) :
@staticmethod
def __createSpreadsheet( numRows = 10 ) :
s = Gaffer.Spreadsheet()
rowsPlug = s["rows"]
# N = row number, starting at 1
# Rows named 'rowN'
# Column 0 - string - 'sN'
# Column 1 - int - N
# Column 2 - int - 10N - even rows disabled
# Column 3 - float - 100N
# Column 4 - int - 1000N
# Column 5 - compound plug
# Column 6 - Non-adopted NameValuePlug
# Column 7 - Adopted NameValuePlug
compoundPlug = Gaffer.ValuePlug()
compoundPlug["a"] = Gaffer.FloatPlug()
compoundPlug["b"] = Gaffer.StringPlug()
nameValuePlug = Gaffer.NameValuePlug( "nvp", IECore.FloatData( 0 ), True )
for i, columnPlug in enumerate( (
Gaffer.StringPlug(), # 0
Gaffer.IntPlug(), # 1
Gaffer.IntPlug(), # 2
Gaffer.FloatPlug(), # 3
Gaffer.IntPlug(), # 4
compoundPlug, # 5
nameValuePlug # 6
) ) :
rowsPlug.addColumn( columnPlug, "column%d" % i, adoptEnabledPlug = False )
rowsPlug.addColumn( nameValuePlug, "column7", adoptEnabledPlug = True )
for i in range( 1, numRows + 1 ) :
rowsPlug.addRow()["name"].setValue( "row%d" % i )
rowsPlug[i]["cells"][0]["value"].setValue( "s%d" % ( i ) )
rowsPlug[i]["cells"][2]["enabled"].setValue( i % 2 )
for c in range( 1, 5 ) :
rowsPlug[i]["cells"][c]["value"].setValue( i * pow( 10, c - 1 ) )
rowsPlug[i]["cells"][5]["value"]["a"].setValue( i * 0.1 )
rowsPlug[i]["cells"][5]["value"]["b"].setValue( "string %f" % ( i * 0.1 ) )
rowsPlug[i]["cells"][6]["value"]["value"].setValue( i * 0.01 )
return s
# Provides a way to easily check the resulting value hierarchy under a cell plug.
@staticmethod
def __cellPlugHashes( cellPlugMatrix ) :
return [ [ c.hash() for c in row ] for row in cellPlugMatrix ]
def testCellPlugMatrix( self ) :
s = self.__createSpreadsheet()
self.assertEqual( _ClipboardAlgo.createPlugMatrixFromCells( [] ), [] )
self.assertEqual(
_ClipboardAlgo.createPlugMatrixFromCells( [ s["rows"][1]["cells"][2] ] ),
[ [ s["rows"][1]["cells"][2] ] ]
)
columns = [ 2, 0, 3 ]
rows = ( 0, 4, 2, 5 )
plugs = []
for r in rows :
for c in columns :
plugs.append( s["rows"][r]["cells"][c] )
columns.append( columns.pop( 0 ) )
expected = [ [ s["rows"][r]["cells"][c] for c in sorted(columns) ] for r in sorted(rows) ]
self.assertEqual( _ClipboardAlgo.createPlugMatrixFromCells( plugs ), expected )
def testCanCopyPlugs( self ) :
s = self.__createSpreadsheet()
self.assertFalse( _ClipboardAlgo.canCopyPlugs( [] ) )
self.assertFalse( _ClipboardAlgo.canCopyPlugs( [ [] ] ) )
# Single cell
self.assertTrue( _ClipboardAlgo.canCopyPlugs( [
[ s["rows"][1]["cells"][0] ]
] ) )
# Two rows (contigious)
self.assertTrue( _ClipboardAlgo.canCopyPlugs( [
[ s["rows"][1]["cells"][0] ],
[ s["rows"][2]["cells"][0] ]
] ) )
# Three rows (non-contiguous)
self.assertTrue( _ClipboardAlgo.canCopyPlugs( [
[ s["rows"][1]["cells"][0] ],
[ s["rows"][3]["cells"][0] ],
[ s["rows"][5]["cells"][0] ]
] ) )
# Two columns (contiguous)
self.assertTrue( _ClipboardAlgo.canCopyPlugs( [
[ s["rows"][1]["cells"][0], s["rows"][1]["cells"][1] ]
] ) )
# Three columns (non-contiguous)
self.assertTrue( _ClipboardAlgo.canCopyPlugs( [
[ s["rows"][1]["cells"][0], s["rows"][1]["cells"][1], s["rows"][1]["cells"][3] ]
] ) )
# Three rows, two columns (non-contiguous)
self.assertTrue( _ClipboardAlgo.canCopyPlugs( [
[ s["rows"][1]["cells"][0], s["rows"][1]["cells"][2] ],
[ s["rows"][3]["cells"][0], s["rows"][3]["cells"][2] ],
[ s["rows"][4]["cells"][0], s["rows"][4]["cells"][2] ],
] ) )
# Non-contiguous but compatible column types
self.assertTrue( _ClipboardAlgo.canCopyPlugs( [
[ s["rows"][1]["cells"][1], s["rows"][1]["cells"][4] ],
[ s["rows"][4]["cells"][2], s["rows"][4]["cells"][4] ],
] ) )
# Mixed column types
self.assertFalse( _ClipboardAlgo.canCopyPlugs( [
[ s["rows"][1]["cells"][0], s["rows"][1]["cells"][1] ],
[ s["rows"][4]["cells"][1], s["rows"][4]["cells"][2] ],
] ) )
# Inconsistent column counts
self.assertFalse( _ClipboardAlgo.canCopyPlugs( [
[ s["rows"][1]["cells"][0], s["rows"][1]["cells"][1] ],
[ s["rows"][4]["cells"][1] ]
] ) )
def testIsValueMatrix( self ) :
O = IECore.ObjectVector
C = IECore.CompoundData
B = IECore.BoolData
I = IECore.IntData
F = IECore.FloatData
for d in (
"cat",
1,
None,
[],
[ 1, 2, 3 ],
[ [ 1 ] ],
[ [ 1, 4 ] ],
# Incorrect cell data type
O([
O([ O([ I( 1 ), I( 2 ) ]) ])
]),
# Mixed row value types
O([
O([ C({ "enabled" : B( True ), "value" : I( 2 ) }) ]),
O([ C({ "enabled" : B( True ), "value" : F( 2 ) }) ])
]),
# Mixed row keys
O([
O([ C({ "enabled" : B( True ), "value" : I( 2 ) }) ]),
O([ C({ "znabled" : B( True ), "value" : I( 2 ) }) ])
]),
) :
self.assertFalse( _ClipboardAlgo.isValueMatrix( d ) )
for d in (
# one row, one column
O([ O([ C({ "enabled" : B( True ), "value" : I( 1 ) }) ]) ]),
# one row, two columns
O([ O([ C({ "enabled" : B( True ), "value" : I( 2 ) }), C({ "enabled" : B( True ), "value" : F( 2 ) }) ]) ]),
# two rows, one column
O([
O([ C({ "enabled" : B( True ), "value" : I( 3 ) }) ]),
O([ C({ "enabled" : B( True ), "value" : I( 3 ) }) ])
]),
# two rows, two columns
O([
O([ C({ "enabled" : B( True ), "value" : I( 4 ) }), C({ "enabled" : B( False ), "value" : F( 4 ) }) ]),
O([ C({ "enabled" : B( True ), "value" : I( 4 ) }), C({ "enabled" : B( True ), "value" : F( 4 ) }) ])
]),
) :
self.assertTrue( _ClipboardAlgo.isValueMatrix( d ) )
def testValueMatrix( self ) :
s = self.__createSpreadsheet()
plugs = [
[
s["rows"][1]["cells"][0], s["rows"][1]["cells"][1], s["rows"][1]["cells"][2],
s["rows"][1]["cells"][3], s["rows"][1]["cells"][4]
],
[
s["rows"][2]["cells"][0], s["rows"][2]["cells"][1], s["rows"][2]["cells"][2],
s["rows"][2]["cells"][3], s["rows"][2]["cells"][4]
]
]
expected = IECore.ObjectVector( [
IECore.ObjectVector( [
IECore.CompoundData( { "enabled" : c["enabled"].getValue(), "value" : c["value"].getValue() } ) for c in r
] ) for r in plugs
] )
data = _ClipboardAlgo.valueMatrix( plugs )
self.assertTrue( _ClipboardAlgo.isValueMatrix( data ) )
self.assertEqual( data, expected )
# Test inerleaved compatible (int) columns
plugs = [ [ s["rows"][r]["cells"][ ( r % 2 ) + 1 ] ] for r in ( 1, 2 ) ]
expected = IECore.ObjectVector( [
IECore.ObjectVector( [
IECore.CompoundData( {
"enabled" : s["rows"][r]["cells"][ ( r % 2 ) + 1 ]["enabled"].getValue(),
"value" : s["rows"][r]["cells"][ ( r % 2 ) + 1 ]["value"].getValue()
} )
] ) for r in ( 1, 2 )
] )
self.assertEqual( _ClipboardAlgo.valueMatrix( plugs ), expected )
def testCanPasteCells( self ) :
s = self.__createSpreadsheet()
# Single Column
plugs = [ [ s["rows"][r]["cells"][1] ] for r in ( 1, 2 ) ]
data = _ClipboardAlgo.valueMatrix( plugs )
# Bad data
self.assertFalse( _ClipboardAlgo.canPasteCells( "I'm a duck", plugs ) )
self.assertTrue( _ClipboardAlgo.canPasteCells( data, plugs ) )
# - fewer rows
self.assertTrue( _ClipboardAlgo.canPasteCells( data, [ [ s["rows"][4]["cells"][1] ] ] ) )
# - row wrap with more rows
self.assertTrue( _ClipboardAlgo.canPasteCells( data, [ [ s["rows"][r]["cells"][1] ] for r in range( 1, 5 ) ] ) )
# - different column, same type
self.assertTrue( _ClipboardAlgo.canPasteCells( data, [ [ s["rows"][r]["cells"][2] ] for r in range( 1, 5 ) ] ) )
# - different columns, same type
self.assertTrue( _ClipboardAlgo.canPasteCells( data, [ [ s["rows"][r]["cells"][ ( r % 2 ) + 1 ] ] for r in range( 1, 5 ) ] ) )
# - invalid column type
self.assertFalse( _ClipboardAlgo.canPasteCells( data, [ [ s["rows"][r]["cells"][0] ] for r in range( 1, 5 ) ] ) )
# - different columns, one invalid type
self.assertFalse( _ClipboardAlgo.canPasteCells( data, [ [ s["rows"][r]["cells"][ ( r % 2 ) ] ] for r in range( 1, 5 ) ] ) )
# - column wrap with multiple valid target columns
self.assertTrue( _ClipboardAlgo.canPasteCells( data, [ [ s["rows"][r]["cells"][c] for c in ( 1, 2, 4 ) ] for r in range( 1, 5 ) ] ) )
# Multiple Columns
plugs = [ [ s["rows"][r]["cells"][c] for c in range( 3 ) ] for r in ( 1, 2 ) ]
data = _ClipboardAlgo.valueMatrix( plugs )
self.assertTrue( _ClipboardAlgo.canPasteCells( data, plugs ) )
# - fewer rows
self.assertTrue(
_ClipboardAlgo.canPasteCells( data, [ [ s["rows"][4]["cells"][c] for c in range( 3 ) ] ] )
)
# - row wrap with more rows
self.assertTrue(
_ClipboardAlgo.canPasteCells( data, [ [ s["rows"][r]["cells"][c] for c in range( 3 ) ] for r in range( 1, 3 ) ] )
)
# | |
mode
newordering = []
newsortcol = None
to_track = (self.prevgroup != "")
for oo in self.ordering:
# oo is from interval [-inf;-1] [1;inf]
# it must be converted to column index, sign remebered and then applied afterwards
sign = oo > 0
oo = oo - 1 if sign else - 1 - oo
rr = GroupTrack.translateorder(to_track, oo)
if rr < 0:
continue
#if newsortcol == None: # the first ordering is the most recent
newsortcol = rr + 1 if sign else - 1 - oo
newordering.append(newsortcol)
# backup is currently useful only for grouptracks (switching from track)
if to_track: # to track - save backup
self.ordbackup = None
if not self.ordering:
self.ordbackup = 1
elif abs(self.ordering[-1]) == 1:
self.ordbackup = self.ordering[-1]
else: # to grouptrack - restore backup
if self.ordbackup != None:
newsortcol = self.ordbackup
newordering.append(newsortcol)
self.ordbackup = None
self.ordering = newordering
return newsortcol
@classmethod
def removeissues(cls, rule):
"replace spaces, non-alphanumeric characters and some keywords with percent signs"
# FIXME - this is sluggish
rule = rule.lower().strip()
rule = rule.replace("!a:", "ARTISTARTISTARTISTARTISTARTISTARTIST")
rule = rule.replace("!n:", "NAMENAMENAMENAMENAMENAME")
rule = rule.replace("!r:>", "RATINGMORERATINGMORERATINGMORERATINGMORERATINGMORERATINGMORE")
rule = rule.replace("!r:<", "RATINGLESSRATINGLESSRATINGLESSRATINGLESSRATINGLESSRATINGLESS")
rule = rule.replace("!r:", "RATINGRATINGRATINGRATINGRATINGRATING")
rule = rule.replace("!*:", "NEWNEWNEWNEWNEWNEW")
rule = rule.replace("!g:", "GROUPGROUPGROUPGROUP")
for rr in cls.issues:
rule = rule.replace(rr, "%")
rule = rule.translate(TrackModel.trtable)
for rr in cls.issuesmagic:
rule = rule.replace(rr, "%")
rule = rule.replace("ARTISTARTISTARTISTARTISTARTISTARTIST", "!a:")
rule = rule.replace("NAMENAMENAMENAMENAMENAME", "!n:")
rule = rule.replace("RATINGMORERATINGMORERATINGMORERATINGMORERATINGMORERATINGMORE", "!r:>")
rule = rule.replace("RATINGLESSRATINGLESSRATINGLESSRATINGLESSRATINGLESSRATINGLESS", "!r:<")
rule = rule.replace("RATINGRATINGRATINGRATINGRATINGRATING", "!r:")
rule = rule.replace("NEWNEWNEWNEWNEWNEW","!*:")
rule = rule.replace("GROUPGROUPGROUPGROUP", "!g:")
while "%%" in rule:
rule = rule.replace("%%", "%")
# replacement from the future :)
#import re
#rep = {"condition1": "", "condition2": "text"} # define desired replacements here
## use these three lines to do the replacement
#rep = dict((re.escape(k), v) for k, v in rep.iteritems())
#pattern = re.compile("|".join(rep.keys()))
#text = pattern.sub(lambda m: rep[m.group(0)], text)
return rule
def buildquery(self, query, rule):
"""
convert string rule to sqlaclhemy query
the rule may ba simple or advanced (!a:artist !n:name !r:rating !*:new)
"""
q = query # for convenience
rule = self.removeissues(rule)
m = ""
ar = {}
for s in rule.split("!"):
cut = 2
if s.startswith("a:"):
m = "artist"
elif s.startswith("n:"):
m = "name"
elif s.startswith("r:<"):
m = "rating<"
cut=3
elif s.startswith("r:>"):
m = "rating>"
cut=3
elif s.startswith("r:"):
m = "rating"
elif s.startswith("*:"):
m = "new"
elif s.startswith("g:"):
m = "group"
else:
ar[m] = ar.get(m, "") + "!" + s
continue
ar[m] = s[cut:].strip()
# analyze the results a little
#if not "artist" in ar and not "name" in ar and not "rating" in ar and not "rating>" in ar:
# ar = None
for rr in ["artist", "name", "rating", "rating>", "rating<", "new", "group"]:
if rr in ar:
break
else: # this is not an advanced rule
ar = None
if ar != None and "new" in ar:
ar["new"] = ( ar["new"].lower() in ["","yes","true","t", "y"] )
commonrule = None
if ar == None:
commonrule = '%'+rule+'%'
else:
if "" in ar and ar[""] != "!":
commonrule = '%'+ar[""]+'%'
if "artist" in ar:
q = q.filter(Track.artist.ilike("%" + ar["artist"] + "%"))
if "name" in ar:
q = q.filter(Track.name.ilike("%" + ar["name"] + "%"))
if "rating" in ar:
q = q.filter(Track.rating == ar["rating"])
if "rating<" in ar:
q = q.filter(Track.rating < ar["rating<"])
if "rating>" in ar:
q = q.filter(Track.rating > ar["rating>"])
if "new" in ar:
q = q.filter(Track.new == ar["new"])
if "group" in ar:
like = "%" + ar["group"].strip() + "%"
if self.gtmode:
# aliases are needed if gtmode
ag = sqlalchemy.orm.aliased(Group)
agt = sqlalchemy.orm.aliased(GroupTrack)
q = q.join(agt, GroupTrack.trackid==Track.idno)
q = q.join(ag, Group.idno==GroupTrack.groupid)
q = q.filter(Track.idno == agt.trackid)
q = q.filter(agt.groupid == ag.idno)
q = q.filter(ag.name.ilike(like))
else:
q = q.join(GroupTrack).join(Group)
q = q.filter(GroupTrack.groupid==Group.idno)
q = q.filter(Group.name.ilike(like))
if commonrule:
q = q.filter(sqlalchemy.or_(Track.artist.ilike(commonrule), Track.name.ilike(commonrule)))
return q
def filter(self, rule, maxt):
"""just query tracks by the given (possibly advanced) rule"""
self._checksession()
q = self.session.query(Track)
q = self.buildquery(q, rule)
q.order_by(Track.artist).order_by(Track.name)
if not maxt:
return q.all()
return q[0:maxt-1]
def datacount(self):
""" a safe wrapper around len(self.lst) """
if self.lst == None:
return 0
return len(self.lst)
def data(self, row, col, edit=False):
'''
return the entry @ row, col
'''
if self.lst == None:
return None
if row < 0 or row >= len(self.lst):
return None
if self.gtmode:
if col < 0 or col >= len(GroupTrack.headers):
return None
else:
if col < 0 or col >= len(Track.headers):
return None
return self.lst[row].bycol(col, edit=edit)
def tip(self, row, col):
'''
return the tooltip @ row, col
'''
if self.lst == None:
return None
if row < 0 or row >= len(self.lst):
return None
if self.gtmode:
if col < 0 or col >= len(GroupTrack.headers):
return None
else:
if col < 0 or col >= len(Track.headers):
return None
return self.lst[row].tipbycol(col)
def setdata(self, row, col, value):
'''
return the lst @ row, col
'''
if self.lst == None:
return False
if row < 0 or row >= len(self.lst):
return False
if self.gtmode:
if col < 0 or col >= len(GroupTrack.headers):
return False
else:
if col < 0 or col >= len(Track.headers):
return False
t = self.lst[row]
self.stats.remove(t)
ok = t.bycol(col, value)
self.stats.add(t)
# add to session
self._checksession()
self.session.add(t)
if self.gtmode:
self.session.add(t.track)
self.session.commit()
return ok
def headercount(self):
if self.gtmode:
if GroupTrack.headers is None:
return 0
return len(GroupTrack.headers)
if Track.headers is None:
return 0
return len(Track.headers)
def header(self, col):
if self.gtmode:
if col < 0 or col >= len(GroupTrack.headers):
return None
return GroupTrack.headers[col]
if col < 0 or col >= len(Track.headers):
return None
return Track.headers[col]
def isStar(self, col):
if self.gtmode:
return GroupTrack.isStar(col)
else:
return Track.isStar(col)
def isCheck(self, col):
if self.gtmode:
return GroupTrack.isCheck(col)
else:
return Track.isCheck(col)
def insertrows(self, position, rows):
""" only used to create new rows (would work properly for "add to group" imo) """
if self.lst == None:
self.lst = []
self._checksession()
if self.gtmode:
grp = self.session.query(Group).filter(Group.name==self.prevgroup).one()
self.session.add(grp)
for row in range(position, position+rows):
nt = Track("","",True,0)
ngt = GroupTrack(int(row) + 1, 0, 0)
ngt.track = nt
grp.grouptracks.append(ngt)
self.lst.insert(row, ngt)
self.stats.add(nt)
self.session.add(nt)
self.session.add(ngt)
else:
for row in range(position, position+rows):
nt = Track("","",True,0)
self.stats.add(nt)
self.lst.insert(row, nt)
self.session.add(nt)
self.session.commit()
def removerowslist(self,rows):
''' remove the rows given by the list '''
if self.lst == None:
return None
if rows == None or type(rows) != list or len(rows) == 0:
return None
assert() # this method is probably broken
self._checksession()
for row in reversed(rows):
dt = self.lst[row]
if type(dt) is Track:
self.stats.remove(dt)
print("Deleting", dt.menucaption())
else:
self.stats.remove(dt, t=False)
self.session.delete(dt)
del(self.lst[row])
self.session.commit()
def removerows(self, position, rows, trackstoo):
'''
remove rows given by starting position and number of rows
'''
if self.lst == None:
return None
self._checksession()
for row in range(position+rows-1, position-1, -1):
dt = self.lst[row]
t = True # track will be deleted (not "unlinked")
if type(dt) is Track:
print("Deleting", dt.menucaption())
else: # type(dt) is GroupTrack:
if trackstoo:
print("Deleting", dt.track.menucaption())
self.session.delete(dt.track)
else:
t = False
self.stats.remove(dt, t=t)
self.session.delete(dt)
del(self.lst[row])
self.session.commit()
def rating(self, rows, rr):
if self.lst == None:
return
for row in rows:
tt = self.lst[row]
if self.gtmode:
tt = tt.track
self.stats.remove(tt)
tt.rating = rr
self.stats.add(tt)
self.session.add(tt)
self.session.commit()
def toggleNew(self, rows):
if self.lst == None:
return
for row in rows:
tt = self.lst[row]
if self.gtmode:
tt = tt.track
self.stats.remove(tt)
tt.new = not tt.new
self.stats.add(tt)
self.session.add(tt)
self.session.commit()
def mergerows(self, torow, row):
'''
merge two tracks into one
the track given by row will be deleted, all links to it will be redirected to torow track
returns the new index of the merged row (which might have changed)
'''
if self.lst == None:
return -1
#if self.gtmode:
# return -1
self._checksession()
tdel = self.lst[row]
tt = tgt = self.lst[torow]
if self.gtmode:
gc = tdel.group
tt = tt.track
tdel = tdel.track
# relink grouptracks to the merged track
# it cant be done directly, because any change to gtdel will also change tdel.grouptracks
lgt = []
for gtdel in tdel.grouptracks:
lgt.append(gtdel)
for gtdel in lgt:
# statrefr - update statistics for this track (remove and add)
statrefr = | |
<gh_stars>10-100
# Copyright 2021 VMware, Inc.
# SPDX-License-Identifier: Apache License 2.0
'''
Created on Sep 4, 2015
@author: <NAME> (<EMAIL>)
This has examples of creating VS
'''
import argparse
from copy import deepcopy
import json
import math
import requests
from requests.auth import HTTPBasicAuth
from string import Template
import logging
from avi.sdk.avi_api import ApiSession
log = logging.getLogger(__name__)
ANALYTICS_POLICY = {
'metrics_realtime_update': {
'enabled': True,
'duration': 0,
},
'full_client_logs': {
'enabled': True,
'duration': 0,
}
}
VS_CFG = {
"virtualservice": {'analytics_policy': ANALYTICS_POLICY,
'application_profile_ref': '/api/applicationprofile?name=System-HTTP'}
}
APP_CPU = 0.2
DEFAULT_APP = {
"id": "bridged-webapp",
"cmd": "service nginx start && /usr/sbin/sshd -D",
#"cmd": "service nginx start",
"cpus": APP_CPU,
"mem": 32.0,
"instances": 2,
"labels": {"avi_proxy": json.dumps(VS_CFG)},
"container": {
"type": "DOCKER",
"docker": {
"image": "avinetworks/server",
"forcePullImage": True,
#"image": "nginx",
"network": "BRIDGE",
"portMappings": [
{"containerPort": 80, "hostPort": 0, "servicePort": 0,
"protocol": "tcp"}
]
}
},
"healthChecks": [
{"protocol": "HTTP", "portIndex": 0, "path": "/",
"gracePeriodSeconds": 10, "intervalSeconds": 20,
"maxConsecutiveFailures": 3}
]
}
AVI_SERVER = {
"id": "bridged-webapp",
"cmd": "service nginx start && /usr/sbin/sshd -D",
#"cmd": "service nginx start",
"cpus": APP_CPU,
"mem": 64.0,
"instances": 2,
"labels": {"avi_proxy": json.dumps(VS_CFG)},
"container": {
"type": "DOCKER",
"docker": {
"image": "avinetworks/server",
"network": "BRIDGE",
"portMappings": [
{"containerPort": 80, "hostPort": 0, "servicePort": 0,
"protocol": "tcp"}
]
}
},
"healthChecks": [
{"protocol": "HTTP", "portIndex": 0, "path": "/",
"gracePeriodSeconds": 10, "intervalSeconds": 20,
"maxConsecutiveFailures": 3}
]
}
DEFAULT_CLIENT = {
"id": "test-client",
#"cmd": "service nginx start; service ssh start; sleep 30000000",
"cmd": "service nginx start && /usr/sbin/sshd -D",
"cpus": APP_CPU,
"mem": 64.0,
"instances": 1,
"labels": {"avi_proxy": json.dumps(VS_CFG)},
"container": {
"type": "DOCKER",
"docker": {
"image": "avinetworks/server",
"network": "BRIDGE",
"portMappings": [
{"containerPort": 80, "hostPort": 0, "servicePort": 19994,
"protocol": "tcp"}
]
}
},
"healthChecks": [
{"protocol": "HTTP", "portIndex": 0, "path": "/",
"gracePeriodSeconds": 5, "intervalSeconds": 20,
"maxConsecutiveFailures": 3}
]
}
class MesosTestUtils(object):
'''
Utilities for the marathon App. currently it implements
1. Creation
2. Deletion
'''
MARATHON_HDRS = {'Content-Type': 'application/json', "Accept": "application/json"}
MARATHON_APP_TEMPLATES = {
'default': DEFAULT_APP,
'floating': DEFAULT_APP,
'test-client': DEFAULT_CLIENT,
'avi-server': AVI_SERVER}
DOCKER_REGISTRY = '10.128.7.253'
def __init__(self):
pass
def mesosCloudObj(self, marathon_ip, fleet_endpoint, sefolder, ew_subnet):
mesos_confg_obj = {
"mesos_url": "http://%s:5050" % marathon_ip,
"se_volume": "/opt/avi",
# "disable_se_repository_push": false,
"use_bridge_ip_as_vip": True,
# "use_marathon_se_deployment": false,
# "disable_auto_frontend_service_sync": false,
# "disable_auto_backend_service_sync": false,
# "feproxy_bridge_name": "cbr1",
'marathon_configurations': [
{"marathon_url": "http://%s:8080" % marathon_ip}],
# "container_port_match_http_service": true,
"fleet_endpoint": "http://%s:4444" % fleet_endpoint,
"docker_registry_se": {
"registry": "%s:5000/%s" % (self.DOCKER_REGISTRY, sefolder)},
"east_west_placement_subnet": {"ip_addr": {
"type": "V4",
"addr": ew_subnet},
"mask": 24},
"use_container_ip_port": True
# "prefer_static_routes": false,
# "mtu": 1500, "apic_mode": false,
# "enable_vip_static_routes": false
}
return mesos_confg_obj
def createApp(self, marathon_url, app_type, app_name, num_apps,
num_instances=None, northsouth=0, vips=None,
virtualservice=None, pool=None,
auth_type=None, auth_token=None, username=None, password=<PASSWORD>,
ns_service_port=None, ew_service_port_start_index=None,
num_service_ports=1, constraints=None,
cpus=None, mem=None, tenant=None, no_healthcheck=False):
if virtualservice is None:
virtualservice = {}
if pool is None:
pool = {}
marathon_uri = marathon_url + '/v2/apps'
app_ids = []
print('type', app_type, 'name', app_name, 'instances', num_instances)
print('service_port count', num_service_ports, 'ns', northsouth, 'vip', vips)
for index in range(num_apps):
app_id = (app_name + '-' + str(index + 1)
if num_apps > 1 else app_name)
app_obj = self.MARATHON_APP_TEMPLATES[app_type]
app_obj = deepcopy(app_obj)
if no_healthcheck:
app_obj.pop('healthChecks')
if num_instances:
if num_instances < 0:
app_obj['instances'] = index % 3 + 1
else:
app_obj['instances'] = num_instances
elif num_instances == 0:
app_obj['instances'] = 0
# else None: not set so use the default
if cpus:
app_obj['cpus'] = cpus
if mem:
app_obj['mem'] = mem
app_obj['id'] = app_id
app_ids.append(app_id)
avi_proxy_json = app_obj['labels']['avi_proxy']
print(' proxy json-', avi_proxy_json)
avi_proxy = json.loads(avi_proxy_json)
if tenant:
avi_proxy['tenant'] = tenant
if virtualservice:
if 'virtualservice' not in avi_proxy:
avi_proxy['virtualservice'] = virtualservice
else:
for k, v in virtualservice.items():
avi_proxy['virtualservice'][k] = v
if northsouth and vips and (index % math.ceil(float(num_apps)/northsouth) == 0):
app_obj['labels']['FE-Proxy'] = 'Yes'
if app_type == 'floating':
avi_proxy['virtualservice']['auto_allocate_floating_ip'] = True
avi_proxy['virtualservice']['auto_allocate_ip'] = True
else:
ns_index = int(index / (num_apps/northsouth))
app_obj['labels']['FE-Proxy-VIP'] = vips[ns_index]
# add services same as service port
avi_proxy['virtualservice']['services'] = \
[{'port': int(ns_service_port)}]
if pool:
if 'pool' not in avi_proxy:
avi_proxy['pool'] = pool
else:
for k, v in pool.items():
avi_proxy['pool'][k] = v
app_obj['labels']['avi_proxy'] = json.dumps(avi_proxy)
port_mapping_template = {'containerPort': 80, 'hostPort': 0, 'servicePort': 0, 'protocol': 'tcp'}
for service_port_counter in range(num_service_ports):
if service_port_counter > 0:
port_mapping = deepcopy(port_mapping_template)
app_obj['container']['docker']['portMappings'].append(port_mapping)
#if service_port and not northsouth:
if ew_service_port_start_index and not app_obj['labels'].get('FE-Proxy'):
app_obj['container']['docker']['portMappings'][service_port_counter]['servicePort'] = \
int(ew_service_port_start_index) + (index*num_service_ports) + service_port_counter
if constraints:
app_obj['constraints'] = []
for constraint in constraints:
app_obj['constraints'].append(constraint)
print('constraints:', app_obj['constraints'])
headers = self.MARATHON_HDRS
auth = None
if auth_type == 'token':
headers.update({'Authorization': 'token=%s'%str(auth_token)})
elif auth_type == 'basic':
auth=HTTPBasicAuth(username, password)
rsp = requests.post(marathon_uri, data=json.dumps(app_obj),
auth = auth, headers=headers)
if rsp.status_code == 409:
print('got response %s, retrying with force=true' %rsp.text)
marathon_uri = marathon_uri + '?force=true'
rsp = requests.post(marathon_uri, data=json.dumps(app_obj),
auth = auth, headers=headers)
if rsp.status_code >= 300:
raise RuntimeError('failed to create app %s; got response code %s: %s' %(app_id, str(rsp.status_code), rsp.text))
print('created app', app_id, app_obj, ' response ', rsp.text)
return app_ids
def getInfo(self, marathon_uri, auth_type=None, auth_token=None, username=None, password=<PASSWORD>):
headers = self.MARATHON_HDRS
auth = None
if auth_type == 'token':
headers.update({'Authorization': 'token=%s'%str(auth_token)})
elif auth_type == 'basic':
auth=HTTPBasicAuth(username, password)
rsp = requests.get(marathon_uri, auth=auth, headers=headers)
if rsp.status_code >= 300:
raise RuntimeError('failed to get ' + marathon_uri + ', got response code' + str(rsp.status_code) + ': '+ rsp.text)
print('response: ', rsp.text)
info = json.loads(rsp.text) if rsp.text else {}
log.debug('info %s', info)
return info
def getAppInfo(self, marathon_url, app_id,
auth_type=None, auth_token=None, username=None, password=None):
marathon_uri = marathon_url + '/v2/apps'
marathon_uri += '/' + app_id
return self.getInfo(marathon_uri, auth_type=auth_type, auth_token=auth_token, username=username, password=password)
def getAppInfos(self, marathon_url,
auth_type=None, auth_token=None, username=None, password=None):
marathon_uri = marathon_url + '/v2/apps'
return self.getInfo(marathon_uri, auth_type=auth_type, auth_token=auth_token, username=username, password=password)
def updateAppConfig(self, marathon_url, app_id,
auth_type=None, auth_token=None, username=None, password=<PASSWORD>,
**kwargs):
app_obj = self.getAppInfo(marathon_url, app_id, auth_type, auth_token, username, password)
app_obj = app_obj['app']
# see https://github.com/mesosphere/marathon/issues/3054
# could also do it on uri to be forwards compatible rather than backwards
app_obj.pop('fetch', None)
for k, v in kwargs.items():
app_obj[k] = v
del app_obj['version']
headers = self.MARATHON_HDRS
auth = None
if auth_type == 'token':
headers.update({'Authorization': 'token=%s'%str(auth_token)})
elif auth_type == 'basic':
auth=HTTPBasicAuth(username, password)
marathon_uri = marathon_url + '/v2/apps/' + app_id + '?force=true'
rsp = requests.put(marathon_uri, data=json.dumps(app_obj),
auth=auth, headers=headers)
if rsp.status_code >= 300:
raise RuntimeError('failed to update app config, got response code' + str(rsp.status_code) + ': '+ rsp.text)
print('updated app', app_id, ' response ', rsp.text)
return rsp
def updateAviProxy(self, marathon_url, app_id, avi_proxy,
auth_type=None, auth_token=None, username=None, password=None):
app_obj = self.getAppInfo(marathon_url, app_id, auth_type, auth_token, username, password)
app_obj = app_obj['app']
# see https://github.com/mesosphere/marathon/issues/3054
# could also do it on uri to be forwards compatible rather than backwards
app_obj.pop('fetch', None)
app_obj['labels']['avi_proxy'] = json.dumps(avi_proxy)
del app_obj['version']
headers = self.MARATHON_HDRS
auth = None
if auth_type == 'token':
headers.update({'Authorization': 'token=%s'%str(auth_token)})
elif auth_type == 'basic':
auth=HTTPBasicAuth(username, password)
marathon_uri = marathon_url + '/v2/apps/' + app_id + '?force=true'
rsp = requests.put(marathon_uri, data=json.dumps(app_obj),
auth=auth, headers=headers)
if rsp.status_code >= 300:
raise RuntimeError('failed to update app avi proxy, got response code' + str(rsp.status_code) + ': '+ rsp.text)
print('updated app', app_id, ' response ', rsp.text)
return rsp
def updateApp(self, marathon_url, app_id, vs_obj=None,
auth_type=None, auth_token=None, username=None, password=<PASSWORD>,
avi_version=None, **kwargs):
app_obj = self.getAppInfo(marathon_url, app_id, auth_type, auth_token, username, password)
app_obj = app_obj['app']
# see https://github.com/mesosphere/marathon/issues/3054
# could also do it on uri to be forwards compatible rather than backwards
app_obj.pop('fetch', None)
avi_proxy = json.loads(app_obj['labels']['avi_proxy'])
if not vs_obj:
vs_cfg = avi_proxy.get('virtualservice')
else:
vs_cfg = vs_obj
for k, v in kwargs.items():
vs_cfg[k] = v
if not 'labels' in app_obj:
app_obj['labels'] = {}
if avi_version:
avi_proxy['version'] = avi_version
avi_proxy['virtualservice'] = vs_cfg
app_obj['labels']['avi_proxy']= json.dumps(avi_proxy)
del app_obj['version']
marathon_uri = marathon_url + '/v2/apps/' + app_id + '?force=true'
log.info('uri %s app %s', marathon_uri, app_obj)
headers = self.MARATHON_HDRS
auth = None
if auth_type == 'token':
headers.update({'Authorization': 'token=%s'%str(auth_token)})
elif auth_type == 'basic':
auth=HTTPBasicAuth(username, password)
rsp = requests.put(marathon_uri, data=json.dumps(app_obj),
auth=auth, headers=headers)
if rsp.status_code >= 300:
raise RuntimeError('failed to update app, got response code' + str(rsp.status_code) + ': '+ rsp.text)
print('updated app', app_id, ' response ', rsp.text)
return app_obj
def restartApp(self, marathon_url, app_id,
auth_type=None, auth_token=None, username=None, password=<PASSWORD>):
marathon_uri = marathon_url + '/v2/apps/' + app_id + '/restart'
headers = self.MARATHON_HDRS
auth = None
if auth_type == 'token':
headers.update({'Authorization': 'token=%s'%str(auth_token)})
elif auth_type == 'basic':
auth=HTTPBasicAuth(username, password)
rsp = requests.post(marathon_uri, auth=auth, headers=headers)
if rsp.status_code >= 300:
raise RuntimeError('failed to restart app, got response code' + str(rsp.status_code) + ': '+ rsp.text)
print('restarted app', app_id, ' rsp ', rsp.text)
def deleteApp(self, marathon_url, | |
snorkel snorkeled snorkeling snorkels snort snorted snorting
snorts snot snots snotted snotting snout snouted snouting snouts
snowball snowballed snowballing snowballs snowdrift snowdrifts snowed
snowfall snowfalls snowflake snowflakes snowier snowiest snowing
snowplow snowplowed snowplowing snowplows snows snowstorm snowstorms
snowy snub snubbed snubbing snubs snuff snuffed snuffer snuffing
snuffs snug snugged snugger snuggest snugging snuggle snuggled
snuggles snuggling snugly snugs soak soaked soaking soaks soaped
soapier soapiest soaping soaps soapy soar soared soaring soars sob
sobbed sobbing sobered soberer soberest sobering sobers sobriety sobs
soccer sociable sociables socialists socials sociological sociologist
sociologists sociology socked socking soda sodas sodded sodden sodding
sodium sodomy sods sofa sofas softball softballs soften softened
softening softens softer softest softly softness soggier soggiest
soggy soiled soiling soils sojourn sojourned sojourning sojourns
solace solaced solaces solacing solder soldered soldering solders
soldiered soldiering soled solemn solemner solemnest solemnity
solemnly solicit solicited soliciting solicitous solicits solidarity
solider solidest solidified solidifies solidify solidifying solidity
solidly solids soling solitaire solitaires solitaries solitary
solitude soloed soloing soloist soloists solos soluble solubles
solvent solvents somber somebodies someday someones somersault
somersaulted somersaulting somersaults somethings somewhats somewheres
sonata sonatas sonic sonics sonnet sonnets sonorous soot soothe
soothed soother soothes soothest soothing sootier sootiest sooty sop
sophistication sophistry sophomore sophomores sopped sopping soprano
sopranos sops sorcerer sorcerers sorceress sorceresses sorcery sored
sorely sorer sores sorest soring sororities sorority sorrier sorriest
sorrow sorrowed sorrowful sorrowing sorrows sorta souffle souffles
sounder soundest soundly soundproof soundproofed soundproofing
soundproofs souped souping soups sour sourced sourcing soured sourer
sourest souring sours southeast southeastern southerlies southerly
southerner southerners southerns southpaw southpaws southward
southwest southwestern souvenir souvenirs sovereign sovereigns
sovereignty sow sowed sowing sown sows spa spacecraft spacecrafts
spaceship spaceships spacial spacious spade spaded spades spading
spaghetti spangle spangled spangles spangling spaniel spanielled
spanielling spaniels spank spanked spanking spankings spanks spanned
spanner spanners spanning spans spar spared sparer sparest sparing
spark sparked sparking sparkle sparkled sparkler sparklers sparkles
sparkling sparks sparred sparrer sparring sparrow sparrows spars
sparse sparsely sparser sparsest spas spasm spasmed spasming spasmodic
spasms spat spate spats spatted spatter spattered spattering spatters
spatting spatula spatulas spawn spawned spawning spawns spay spayed
spaying spays spear speared spearhead spearheaded spearheading
spearheads spearing spearmint spears specialer specialists specials
specifics specifier specimens specious speck specked specking specks
spectacle spectacles spectacularly spectaculars spectator spectators
spectra speculated speculates speculating speculations speculative
speculator speculators speeched speeching speechless speedboat
speedboats speedier speediest speedometer speedometers speedy
spellbind spellbinding spellbinds spellbound speller spendthrift
spendthrifts sperm sperms spew spewed spewing spews spheres spherical
sphinx sphinxes spice spiced spices spicier spiciest spicing spicy
spider spiders spied spigots spiked spikes spiking spilling spills
spinach spinal spinals spindlier spindliest spindly spine spineless
spines spinning spins spinster spinsters spirals spire spires spirited
spiriting spiritually spirituals spited spiteful spitefuller
spitefullest spites spiting spittle splash splashed splashes splashing
splat splatter splattered splattering splatters spleen spleens
splendider splendidest splendidly splice spliced splices splicing
splint splinted splinter splintered splintering splinters splinting
splints splurge splurged splurges splurging spokes spokesmen
spokespeople spokesperson spokespersons spokeswoman spokeswomen sponge
sponged sponges spongier spongiest sponging spongy sponsorship
spontaneity spoofed spoofing spoofs spook spooked spookier spookiest
spooking spooks spooky spooled spooling spools spoon spooned spoonful
spoonfuls spooning spoons sporadic spore spores sporran sported
sporting sportsmanship spotless spotlight spotlighted spotlighting
spotlights spottier spottiest spotty spouse spouses spouted spouting
spouts sprain sprained spraining sprains sprangs sprawl sprawled
sprawling sprawls sprayed spraying sprays spreadsheet spreadsheets
spree spreed spreeing sprees sprier spriest sprig sprigs springboard
springboards springier springiest springtime springy sprinkle
sprinkled sprinkler sprinklers sprinkles sprinkling sprinklings sprint
sprinted sprinter sprinters sprinting sprints sprout sprouted
sprouting sprouts spruce spruced sprucer spruces sprucest sprucing
spry spud spuds spun spunk spunked spunking spunks spurn spurned
spurning spurns spurred spurring spurs spurt spurted spurting spurts
sputter sputtered sputtering sputters spying squabble squabbled
squabbles squabbling squadded squadding squadron squadrons squads
squalid squalider squalidest squall squalled squalling squalls squalor
squander squandered squandering squanders squarely squarer squarest
squat squats squatted squatter squattest squatting squawk squawked
squawking squawks squeak squeaked squeakier squeakiest squeaking
squeaks squeaky squeal squealed squealing squeals squeamish squelch
squelched squelches squelching squid squidded squidding squids squint
squinted squinter squintest squinting squints squire squired squires
squiring squirm squirmed squirming squirms squirrel squirrels squirt
squirted squirting squirts stab stabbed stabbing stabled stabler
stables stablest stabling stabs stacked stacking stadium stadiums
staffed staffing staffs stag stagecoach stagecoaches staged staging
stagnant stagnate stagnated stagnates stagnating stagnation stags
staid staider staidest stain stained staining stains staircases
stairway stairways staked stakes staking staled stalemate stalemated
stalemates stalemating staler stales stalest staling stalk stalked
stalking stalks stalled stalling stallion stallions stalls stalwart
stalwarts stamina stammer stammered stammering stammers stampede
stampeded stampedes stampeding stances stanch stanched stancher
stanches stanchest stanching standby standbys standings standoff
standoffs standpoints standstill standstills stank stanks stanza
stanzas staple stapled stapler staplers staples stapling starboard
starch starched starches starchier starchiest starching starchy
stardom starfish starfishes starked starker starkest starking starks
starlight starrier starriest starry startlingly starvation statelier
stateliest stately stater statesman statesmanship statesmen stationed
stationery stationing statistically statistician statisticians statue
statues stature statures statuses statute statutes statutory staunch
staunched stauncher staunches staunchest staunching staunchly stave
staved staving steadfast steadied steadier steadies steadiest
steadying steak steaks stealth stealthier stealthiest stealthily
stealthy steamed steamier steamies steamiest steaming steamroller
steamrollered steamrollering steamrollers steams steamy steeled
steeling steels steeped steeper steepest steeping steeple steeples
steeps stellar stemmed stemming stench stenched stenches stenching
stencil stencils stenographer stenographers stenography stepladder
stepladders stereos stereotyped stereotyping stern sterned sterner
sternest sterning sternly sternness sterns stethoscope stethoscopes
stew steward stewarded stewardess stewardesses stewarding stewards
stewed stewing stews sticker stickers stickied stickier stickies
stickiest stickler sticklers stickying stiffed stiffen stiffened
stiffening stiffens stiffer stiffest stiffing stiffly stiffness stiffs
stifle stifled stifles stifling stigma stigmas stigmata stillborn
stillborns stilled stiller stillest stilling stillness stills stilted
stimulant stimulants stimuli stimulus sting stinger stingers stingier
stingiest stinginess stinging stings stingy stink stinking stinks
stint stinted stinting stints stipulate stipulated stipulates
stipulating stipulation stipulations stirrup stirrups stitch stitched
stitches stitching stockade stockaded stockades stockading stockbroker
stockbrokers stocked stockholder stockholders stockier stockiest
stocking stockings stockpile stockpiled stockpiles stockpiling stocky
stockyard stockyards stodgier stodgiest stodgy stoical stoke stoked
stokes stoking stoles stolid stolider stolidest stolidly stomached
stomaching stomachs stomp stomped stomping stomps stoned stonier
stoniest stoning stony stool stools stoop stooped stooping stoops
stopgap stopgaps stopover stopovers stoppage stoppages stopper
stoppered stoppering stoppers stopwatch stopwatches storehouse
storehouses storekeeper storekeepers storeroom storerooms stork storks
stormed stormier stormiest storming stormy stout stouter stoutest
stove stoves stow stowaway stowaways stowed stowing stows straddle
straddled straddles straddling straggle straggled straggler stragglers
straggles straggling straighted straighten straightened straightening
straightens straighter straightest straightforwardly straightforwards
straighting straights strained strainer strainers straining strait
straited straiting straitjacket straitjacketed straitjacketing
straitjackets straits strand stranded stranding strands strangeness
strangered strangering strangers strangle strangled strangles
strangling strangulation strap strapped strapping straps strata
stratagem stratagems strategics stratified stratifies stratify
stratifying stratosphere stratospheres stratum strawberries strawberry
strawed strawing straws strayed straying strays streak streaked
streaking streaks streamed streamer streamers streaming streamline
streamlined streamlines streamlining streetcar streetcars strengthened
strengthening strengthens strengths strenuous strenuously stressful
stretcher stretchers strew strewed strewing strewn strews stricken
stricter strictest strictness stridden stride strides striding strife
striker strikers strikings stringier stringiest stringing stringy
stripe striped stripes striping stripper striven strives striving
strode stroked strokes stroking stroll strolled stroller strollers
strolling strolls stronghold strongholds strove structuralist strum
strummed strumming strums strung strut struts strutted strutting stub
stubbed stubbier stubbies stubbiest stubbing stubble stubborn
stubborned stubborner stubbornest stubborning stubborns stubby stubs
stud studded studding studentship studios studious studs stuffier
stuffiest stuffy stump stumped stumping stumps stung stunk stunted
stunting stunts stupefied stupefies stupefy stupefying stupendous
stupider stupidest stupidities stupidly stupids stupor stupors
sturdier sturdiest sturdy stutter stuttered stuttering stutters styled
styling stylish stylistic stylus suave suaver suavest sub subbed
subbing subcommittee subcommittees subconscious subconsciously
subdivide subdivided subdivides subdividing subdivision subdivisions
subdue subdued subdues subduing subgroup subjectives subjugate
subjugated subjugates subjugating subjunctive sublet sublets
subletting sublime sublimed sublimer sublimes sublimest subliming
submarine submarines submerge submerged submerges submerging
submersion submissions submissive subnormal subordinate subordinated
subordinates subordinating subprogram subs subscribed subscriber
subscribers subscribes subscribing subscript subscriptions subscripts
subsection subsections subsequents subservient subservients subsets
subside subsided subsides subsidiaries subsidies subsiding subsidy
subsist subsisted subsistence subsisting subsists substandard
substantiate substantiated substantiates substantiating substitutions
subsystem subterfuge subterfuges subterranean subtler subtlest
subtract subtracted subtracting subtraction subtractions subtracts
suburb suburban suburbans suburbs subversive subversives subvert
subverted subverting subverts successes successions successively
successors succinct succincter succinctest succinctly succulent
succulents succumb succumbed succumbing succumbs suck sucked sucker
suckered suckering suckers sucking suckle suckled suckles suckling
sucks suction suctioned suctioning suctions suds suede sufferings
sufficed suffices sufficing suffixed suffixes suffixing suffocate
suffocated suffocates suffocating suffocation suffrage sugared
sugarier sugariest sugaring sugars sugary suggester suggestive
suicides suitcase suitcases suites suitor suitors sulk sulked sulkier
sulkies sulkiest sulking sulks sulky sullen sullener sullenest sultan
sultans sultrier sultriest sultry summarily summered summering summers
summit summits summon summoned summoning summons summonsed summonses
summonsing sumptuous sunbathe sunbathed sunbathes sunbathing sunburn
sunburned sunburning sunburns sundae sundaes sundial sundials sundown
sundowns sundries sunflower sunflowers sunglasses sunken sunks | |
<filename>src/cnstlltn/_cli.py
import ansimarkup
import attrdict
import braceexpand
import click
import fnmatch
import graphviz
import json
import os
import pathlib
import re
import runpy
import shutil
import subprocess
import sys
import tempfile
import threading
import toposort
from . import diffformatter
from .model import Model
from .statestorage import StateStorage
from . import tagexpr
# TODO
# shellcheck
# dot tmpdir
def format_str_list(items):
return ", ".join(json.dumps(i) for i in items)
def process_modes(model_modes, opt_mode):
mode_values = {}
for mode_name, mode_desc in model_modes.items():
mode_values[mode_name] = mode_desc['default']
for mode_str in opt_mode:
fields = mode_str.split("=", 1)
if len(fields) == 1:
fields.append("1")
mode_name, mode_value = fields
if mode_name not in model_modes:
raise click.ClickException("undefined mode '{}'".format(mode_name))
mode_values[mode_name] = mode_value
for mode_name, mode_desc in model_modes.items():
mode_value = mode_values[mode_name]
if mode_desc['choices'] is not None and mode_value not in mode_desc['choices']:
raise click.ClickException(
"'{}' is not a valid value for mode '{}'. Valid values are: {}".format(
mode_value,
mode_name,
format_str_list(mode_desc['choices'])
)
)
if mode_desc['validate_cb'] is not None:
try:
mode_desc['validate_cb'](mode_value, values=mode_values)
except ValueError as e:
raise click.ClickException(e)
return mode_values
def add_modes_to_env(env, used_modes, mode_values):
for mode_name in used_modes:
env["MODE_" + mode_name] = mode_values[mode_name]
def validate_and_finalize_model(model):
if model.statestorage is None:
raise click.ClickException("'statestorage' has not been set")
model.state = StateStorage(backend=model.statestorage)
model.dependencies = {}
for res_name, res in model.resources.items():
res.frozen = False
for bag in ('up', 'down', 'precheck'):
res.file(bag, "script.sh", "\n".join(
[
"set -euo pipefail"
] + [
i[1] for i in sorted(
res.data.script_chunks[bag] + res.data.script_chunks['common'],
key=lambda i: i[0]
)
]
))
dependencies = model.dependencies[res_name] = set()
for dep_res_name in res.data.depends:
dependencies.add(dep_res_name)
for imp_name, (dep_res_name, dep_export_name) in res.data.imports.items():
dependencies.add(dep_res_name)
dep_res = model.resources.get(dep_res_name)
if not dep_res:
raise click.ClickException(
"resource '{}' depends on non-existent resource '{}'".format(
res_name,
dep_res_name
))
if dep_export_name not in dep_res.data.exports:
raise click.ClickException(
"resource '{}' imports variable '{}' which is not exported by resource '{}'".format(
res_name,
dep_export_name,
dep_res_name
))
if not res.data.tags:
res.tags('untagged')
for used_mode in res.data.used_modes:
if used_mode not in model.modes:
raise click.ClickException(
"resource '{}' uses undefined mode '{}'".format(
res_name,
used_mode
))
try:
model.resource_order = toposort.toposort_flatten(model.dependencies)
except toposort.CircularDependencyError as e:
raise click.ClickException("circular resource dependencies: {}".format(e.data))
def load_py_model(path, workspace):
py = runpy.run_path(path)
configure_f = py.get('configure')
if not callable(configure_f):
raise click.ClickException("'configure' function is not defined or is not a callable")
model = Model(path.parent, workspace)
configure_f(model)
notify_f = py.get('notify', lambda *a: None)
if not callable(notify_f):
raise click.ClickException("'notify' is not a callable")
return model, lambda *a: notify_f(model, *a)
def load_model(path, workspace):
if path.suffix == '.py':
return load_py_model(path, workspace)
raise click.ClickException("don't know how to interpret %s" % path)
def process_aliases(
existing_resources,
aliases
):
renames = []
processed = []
for name, res in existing_resources.items():
new_name = aliases.get(name)
if new_name:
renames.append((name, new_name))
processed.append((new_name, res))
else:
processed.append((name, res))
return dict(processed), renames
def make_graph(
model,
current_tags,
existing_resources,
existing_dependencies,
resources_to_up,
resources_to_down
):
# TODO show renames
graph = graphviz.Digraph()
def res_color(res_name):
if res_name in resources_to_up:
return 'green'
if res_name in resources_to_down:
return 'red'
return 'black'
for res_name, res in sorted(model.resources.items()):
is_existing = res_name in existing_resources
is_dirty = is_existing and existing_resources[res_name]['dirty']
label_suffix = ''
if is_dirty:
label_suffix += '*'
graph.node(
'res-' + res_name,
label=res_name + label_suffix,
color=res_color(res_name),
style=['solid', 'bold'][is_existing],
group=['new', 'existing'][is_existing]
)
dependencies = {}
for imp_name, (dep_res_name, dep_export_name) in res.data.imports.items():
dependencies.setdefault(dep_res_name, []).append(dep_export_name)
for dep_res_name, imports in sorted(dependencies.items()):
graph.edge(
'res-' + dep_res_name,
'res-' + res_name,
label=",\n".join(sorted(imports))
)
for res_name, _ in sorted(existing_resources.items()):
if res_name in model.resources:
continue
graph.node(
'res-' + res_name,
label=res_name,
color=res_color(res_name),
style='dashed',
group='old'
)
for dep_res_name in sorted(existing_dependencies[res_name]):
graph.edge(
'res-' + dep_res_name,
'res-' + res_name
)
all_tags = set()
for res_name, tags in current_tags.items():
for tag in tags:
all_tags.add(tag)
graph.edge(
'tag-' + tag,
'res-' + res_name,
style='dashed',
arrowhead='none'
)
with graph.subgraph(name='cluster_tags', graph_attr=dict(style='invis')) as subgraph:
for tag in all_tags:
subgraph.node(
'tag-' + tag,
label=tag,
shape='rectangle',
fillcolor='yellow',
style='filled'
)
for seq, seq_style in (
(resources_to_down, dict(color='red')),
(resources_to_up, dict(color='green'))
):
for i, j in zip(seq[:-1], seq[1:]):
graph.edge(
'res-' + i,
'res-' + j,
constraint='false',
**seq_style
)
return graph
def run_script(*, kind, res_dir, res_name, debug, env, confirm_bail=False):
new_env = dict(os.environ)
for i, j in env.items():
if j is None:
new_env.pop(i, None)
else:
new_env[i] = j
# TODO signal handling per
# https://stefan.sofa-rockers.org/2013/08/15/handling-sub-process-hierarchies-python-linux-os-x/
cp = subprocess.run(
["/bin/bash"]
+ (["-x"] if debug else [])
+ ["script.sh"],
cwd=res_dir,
env=new_env
)
if cp.returncode != 0:
error_message = "{} script for resource '{}' has failed with exit status {}".format(
kind,
res_name,
cp.returncode
)
if confirm_bail:
if click.confirm(
"{}. Ignore and continue? Note: the resource will be permanently forgotten "
"and probably left in an inconsistent state requiring manual intervention".format(error_message)
):
return
raise click.ClickException(error_message)
def write_files(bag, dest_dir):
for fname, body in bag.items():
dest_fname = dest_dir / fname
dest_fname.parent.mkdir(parents=True, exist_ok=True)
dest_fname.write_text(body)
def read_files(path, *, cb=lambda _: None, dest=None):
if dest is None:
dest = {}
for i in path.iterdir():
if i.is_file():
dest[i.name] = i.read_text()
cb(i)
else:
raise click.ClickException("don't know how to deal with '{}'".format(i.absolute()))
return dest
def write_mementos(dest, state):
if dest.exists():
wipe_dir(dest)
else:
dest.mkdir()
for res_name, res in state['resources'].items():
res_dir_name = res_name
if res.get('dirty', True):
res_dir_name += ".dirty"
res_dir = dest / res_dir_name
res_dir.mkdir()
for memento_name, memento_data in res.get('mementos', {}).items():
(res_dir / memento_name).write_text(memento_data)
for mode_str, mementos_names in res.get('mementos_modes', {}).items():
mode = int(mode_str, base=0)
for memento_name in mementos_names:
(res_dir / memento_name).chmod(mode)
def add_dicts(a, b):
c = dict(a)
c.update(b)
return c
def wipe_dir(d):
for i in d.iterdir():
if i.is_dir():
shutil.rmtree(i)
else:
i.unlink()
def show_dict_diff(old, new):
diffs = []
for name in sorted(set(old) | set(new)):
diffs.extend(diffformatter.format_diff(
old.get(name, ''),
new.get(name, ''),
header=["modified: {}".format(name)]
))
def cutline():
click.echo("." * 80)
if diffs:
cutline()
click.echo("".join(diffs), nl=False)
cutline()
def names_to_re(names):
if names:
return re.compile(
'|'.join(
fnmatch.translate(j)
for i in names
for j in braceexpand.braceexpand(i)
)
)
else:
return None
def make_tags_matcher(exprs):
if exprs:
compiled = [tagexpr.compile(i) for i in exprs]
def evaluate(tags):
return any(i(tags) for i in compiled)
return evaluate
else:
return None
def up_resource(
*,
debug,
step,
full,
messages,
model,
res_dir,
resource,
resources_vars,
state,
ignore_identity_change,
ignore_checkpoints,
ignore_precheck,
mode_values,
):
res_dir.mkdir()
exports_dir = res_dir / "exports"
exports_dir.mkdir()
mementos_dir = res_dir / "mementos"
mementos_dir.mkdir()
istate_dir = res_dir / "state"
imports = dict(
(import_name, resources_vars[resource_name][export_name])
for import_name, (resource_name, export_name) in resource.data.imports.items()
)
imports.update(resource.data.const)
new_files = dict(
(
bag_name,
dict(
('/'.join(fname), render_f(imports))
for fname, render_f in bag.items()
)
)
for bag_name, bag in resource.data.files.items()
)
new_up_and_common = add_dicts(new_files['common'], new_files['up'])
write_files(new_up_and_common, res_dir)
new_deps = sorted(model.dependencies[resource.name])
new_tags = sorted(resource.data.tags)
is_new_resource = resource.name not in state['resources']
resource_state = state['resources'].setdefault(resource.name, {})
istate = resource_state.get('state')
if istate is not None:
istate_dir.mkdir()
write_files(istate, istate_dir)
dirty = resource_state.get('dirty', True)
old_files = resource_state.get('files', {})
old_up_and_common = add_dicts(old_files.get('common', {}), old_files.get('up', {}))
old_deps = resource_state.get('deps')
old_tags = resource_state.get('tags', [])
resource_vars = resources_vars[resource.name] = {}
resource_mementos_modes = {}
def set_new_resource_state():
resource_state['files'] = new_files
resource_state['deps'] = new_deps
resource_state['tags'] = new_tags
def check_products():
for x_kind, x_set, x_var in [
("variable", resource.data.exports, resource_vars),
("memento", resource.data.mementos, resource_mementos)
]:
for x_name in x_set:
if x_name not in x_var:
raise click.ClickException("resource '{}' did not export '{}' {}".format(
resource.name,
x_name,
x_kind
))
unexpected = set(x_var) - x_set
if unexpected:
raise click.ClickException("resource '{}' exported unexpected {}(s): {}".format(
resource.name,
x_kind,
', '.join(sorted(unexpected))
))
if full or dirty or resource.data.always_refresh or new_up_and_common != old_up_and_common:
click.echo("Bringing up resource '{}'".format(resource.name))
if debug and not is_new_resource:
show_dict_diff(old_up_and_common, new_up_and_common)
if step:
click.confirm("Proceed?", abort=True, default=True)
resource_state['dirty'] = True
resource_state.pop('exports', None)
if not is_new_resource and not ignore_identity_change:
old_identity = old_up_and_common.get("identity")
new_identity = new_up_and_common.get("identity")
if old_identity != new_identity:
def format_id(s):
if s is None:
return "(unset)"
else:
return "'{}'".format(s)
click.echo(
"Identity of resource '{}' has changed from {} to {}. "
"Will down the old resource before bringing up a new one.".format(
resource.name,
format_id(old_identity),
format_id(new_identity)
)
)
res_dir_down = res_dir.with_suffix(".down")
res_dir_down.mkdir()
for bag in ('common', 'down'):
write_files(old_files.get(bag, {}), res_dir_down)
state.write()
env = {}
add_modes_to_env(env, resource_state.get('used_modes', []), mode_values)
run_script(
kind='down',
res_dir=res_dir_down,
res_name=resource.name,
debug=debug,
confirm_bail=True,
env=env
)
env = {}
add_modes_to_env(env, resource.data.used_modes, mode_values)
if is_new_resource and not ignore_precheck and new_files['precheck']['script.sh']:
res_dir_precheck = res_dir.with_suffix(".precheck")
res_dir_precheck.mkdir()
for bag in ('common', 'precheck'):
write_files(new_files.get(bag, {}), res_dir_precheck)
run_script(
kind='precheck',
res_dir=res_dir_precheck,
res_name=resource.name,
debug=debug,
env=env
)
last_checkpoint = resource_state.pop('checkpoint', None)
if last_checkpoint is not None and not ignore_checkpoints:
res_dir.joinpath("last-checkpoint").write_text(last_checkpoint)
set_new_resource_state()
resource_state['used_modes'] = list(resource.data.used_modes)
state.write()
checkpoint_fifo = res_dir.joinpath("checkpoint")
os.mkfifo(checkpoint_fifo)
def checkpoint_thread_func():
with checkpoint_fifo.open() as f:
for line in f:
line = line.rstrip("\n")
resource_state['checkpoint'] = line
state.write()
checkpoint_thread = threading.Thread(target=checkpoint_thread_func)
checkpoint_thread.start()
with checkpoint_fifo.open("w"):
run_script(
kind='up',
res_dir=res_dir,
res_name=resource.name,
debug=debug,
env=env
)
checkpoint_thread.join()
read_files(exports_dir, dest=resource_vars)
if istate_dir.is_dir():
istate = read_files(istate_dir)
else:
istate | |
self._get_sensorgroup_actions_critical_list()
actions_critical_choices_tuple_list = []
if not actions_critical_choices_list:
ac = SENSORS_AC_NOACTIONSCONFIGURABLE
dv = self._get_display_value(
self.ACTIONS_DISPLAY_CHOICES, ac)
actions_critical_choices_tuple_list.append((ac, dv))
else:
actions_critical_choices_tuple_set = set()
ac = SENSORS_AC_IGNORE
dv = self._get_display_value(
self.ACTIONS_DISPLAY_CHOICES, ac)
actions_critical_choices_tuple_set.add((ac, dv))
for ac in actions_critical_choices_list:
dv = self._get_display_value(
self.ACTIONS_DISPLAY_CHOICES, ac)
if not dv:
dv = ac
actions_critical_choices_tuple_set.add((ac, dv))
actions_critical_choices_tuple_list = \
list(actions_critical_choices_tuple_set)
LOG.debug("actions_critical_choices_tuple_list=%s",
actions_critical_choices_tuple_list)
return actions_critical_choices_tuple_list
def _get_sensorgroup_actions_major_list(self):
actions_major_choices_list = []
if self.actions_major_choices:
actions_major_choices_list = \
self.actions_major_choices.split(",")
return actions_major_choices_list
@property
def sensorgroup_actions_major_choices(self):
dv = self._get_display_value(
self.ACTIONS_DISPLAY_CHOICES,
self.actions_major_choices)
actions_major_choices_tuple = (self.actions_major_choices, dv)
return actions_major_choices_tuple
@property
def sensorgroup_actions_major_choices_tuple_list(self):
actions_major_choices_list = self._get_sensorgroup_actions_major_list()
actions_major_choices_tuple_list = []
if not actions_major_choices_list:
ac = SENSORS_AC_NOACTIONSCONFIGURABLE
dv = self._get_display_value(
self.ACTIONS_DISPLAY_CHOICES, ac)
actions_major_choices_tuple_list.append((ac, dv))
else:
actions_major_choices_tuple_set = set()
ac = SENSORS_AC_IGNORE
dv = self._get_display_value(
self.ACTIONS_DISPLAY_CHOICES, ac)
actions_major_choices_tuple_set.add((ac, dv))
for ac in actions_major_choices_list:
dv = self._get_display_value(
self.ACTIONS_DISPLAY_CHOICES, ac)
if not dv:
dv = ac
actions_major_choices_tuple_set.add((ac, dv))
actions_major_choices_tuple_list = \
list(actions_major_choices_tuple_set)
LOG.debug("actions_major_choices_tuple_list=%s",
actions_major_choices_tuple_list)
return actions_major_choices_tuple_list
def _get_sensorgroup_actions_minor_list(self):
actions_minor_choices_list = []
if self.actions_minor_choices:
actions_minor_choices_list = \
self.actions_minor_choices.split(",")
return actions_minor_choices_list
@property
def sensorgroup_actions_minor_choices(self):
dv = self._get_display_value(
self.ACTIONS_DISPLAY_CHOICES,
self.actions_minor_choices)
actions_minor_choices_tuple = (self.actions_minor_choices, dv)
return actions_minor_choices_tuple
@property
def sensorgroup_actions_minor_choices_tuple_list(self):
actions_minor_choices_list = self._get_sensorgroup_actions_minor_list()
actions_minor_choices_tuple_list = []
if not actions_minor_choices_list:
ac = SENSORS_AC_NOACTIONSCONFIGURABLE
dv = self._get_display_value(
self.ACTIONS_DISPLAY_CHOICES, ac)
actions_minor_choices_tuple_list.append((ac, dv))
else:
actions_minor_choices_tuple_set = set()
ac = SENSORS_AC_IGNORE
dv = self._get_display_value(
self.ACTIONS_DISPLAY_CHOICES, ac)
actions_minor_choices_tuple_set.add((ac, dv))
for ac in actions_minor_choices_list:
dv = self._get_display_value(
self.ACTIONS_DISPLAY_CHOICES, ac)
if not dv:
dv = ac
actions_minor_choices_tuple_set.add((ac, dv))
actions_minor_choices_tuple_list = \
list(actions_minor_choices_tuple_set)
LOG.debug("actions_minor_choices_tuple_list=%s",
actions_minor_choices_tuple_list)
return actions_minor_choices_tuple_list
def host_sensorgroup_list(request, host_id):
sensorgroups = cgtsclient(request).isensorgroup.list(host_id)
return [SensorGroup(n) for n in sensorgroups]
def host_sensorgroup_get(request, isensorgroup_id):
sensorgroup = cgtsclient(request).isensorgroup.get(isensorgroup_id)
if not sensorgroup:
raise ValueError('No match found for sensorgroup_id "%s".' %
isensorgroup_id)
return SensorGroup(sensorgroup)
def host_sensorgroup_create(request, **kwargs):
sensorgroup = cgtsclient(request).isensorgroup.create(**kwargs)
return SensorGroup(sensorgroup)
def host_sensorgroup_update(request, sensorgroup_id, **kwargs):
LOG.debug("sensorgroup_update(): sensorgroup_id=%s, kwargs=%s",
sensorgroup_id, kwargs)
mypatch = []
for key, value in kwargs.items():
mypatch.append(dict(path='/' + key, value=value, op='replace'))
return cgtsclient(request).isensorgroup.update(sensorgroup_id, mypatch)
def host_sensorgroup_delete(request, isensorgroup_id):
return cgtsclient(request).isensorgroup.delete(isensorgroup_id)
def host_sensorgroup_relearn(request, host_uuid):
LOG.info("relearn sensor model for host %s", host_uuid)
return cgtsclient(request).isensorgroup.relearn(host_uuid)
def host_sensorgroup_suppress(request, sensorgroup_id):
kwargs = {'suppress': "True"}
sensorgroup = host_sensorgroup_update(request, sensorgroup_id, **kwargs)
return sensorgroup
def host_sensorgroup_unsuppress(request, sensorgroup_id):
kwargs = {'suppress': "False"}
sensorgroup = host_sensorgroup_update(request, sensorgroup_id, **kwargs)
return sensorgroup
class Host(base.APIResourceWrapper):
"""Wrapper for Inventory Hosts"""
_attrs = ['id', 'uuid', 'hostname', 'personality',
'subfunctions', 'subfunction_oper', 'subfunction_avail',
'location', 'serialid', 'operational', 'administrative',
'invprovision', 'peers',
'availability', 'uptime', 'task', 'capabilities',
'created_at', 'updated_at', 'mgmt_mac', 'mgmt_ip',
'bm_ip', 'bm_type', 'bm_username',
'config_status', 'vim_progress_status', 'patch_current',
'requires_reboot', 'boot_device', 'rootfs_device',
'install_output', 'console', 'ttys_dcd', 'patch_state',
'allow_insvc_patching', 'install_state', 'install_state_info',
'clock_synchronization']
PERSONALITY_DISPLAY_CHOICES = (
(PERSONALITY_CONTROLLER, _("Controller")),
(PERSONALITY_WORKER, _("Worker")),
(PERSONALITY_NETWORK, _("Network")),
(PERSONALITY_STORAGE, _("Storage")),
)
ADMIN_DISPLAY_CHOICES = (
('locked', _("Locked")),
('unlocked', _("Unlocked")),
)
OPER_DISPLAY_CHOICES = (
('disabled', _("Disabled")),
('enabled', _("Enabled")),
)
AVAIL_DISPLAY_CHOICES = (
('available', _("Available")),
('intest', _("In-Test")),
('degraded', _("Degraded")),
('failed', _("Failed")),
('power-off', _("Powered-Off")),
('offline', _("Offline")),
('online', _("Online")),
('offduty', _("Offduty")),
('dependency', _("Dependency")),
)
CONFIG_STATUS_DISPLAY_CHOICES = (
('up_to_date', _("up-to-date")),
('out_of_date', _("out-of-date")),
)
PATCH_STATE_DISPLAY_CHOICES = (
(patch_constants.PATCH_AGENT_STATE_IDLE,
_("Idle")),
(patch_constants.PATCH_AGENT_STATE_INSTALLING,
_("Patch Installing")),
(patch_constants.PATCH_AGENT_STATE_INSTALL_FAILED,
_("Patch Install Failed")),
(patch_constants.PATCH_AGENT_STATE_INSTALL_REJECTED,
_("Patch Install Rejected")),
)
INSTALL_STATE_DISPLAY_CHOICES = (
(constants.INSTALL_STATE_PRE_INSTALL, _("Pre-install")),
(constants.INSTALL_STATE_INSTALLING, _("Installing Packages")),
(constants.INSTALL_STATE_POST_INSTALL, _("Post-install")),
(constants.INSTALL_STATE_FAILED, _("Install Failed")),
(constants.INSTALL_STATE_INSTALLED, _("Installed")),
(constants.INSTALL_STATE_BOOTING, _("Booting")),
(constants.INSTALL_STATE_COMPLETED, _("Completed")),
)
def __init__(self, apiresource):
super(Host, self).__init__(apiresource)
self._personality = self.personality
self._subfunctions = self.subfunctions
self._subfunction_oper = self.subfunction_oper
self._subfunction_avail = self.subfunction_avail
self._location = self.location
self._peers = self.peers
self._bm_type = self.bm_type
self._administrative = self.administrative
self._invprovision = self.invprovision
self._operational = self.operational
self._availability = self.availability
self._capabilities = self.capabilities
self._ttys_dcd = self.ttys_dcd
self.patch_current = "N/A"
self.requires_reboot = "N/A"
self.allow_insvc_patching = True
self._patch_state = patch_constants.PATCH_AGENT_STATE_IDLE
self._clock_synchronizations = self.clock_synchronization
self._install_state = self.install_state
if self._install_state is not None:
self._install_state = self._install_state.strip("+")
@property
def personality(self):
# Override controller personality to retrieve
# the current activity state which
# is reported in the hosts location field
if (self._personality == PERSONALITY_CONTROLLER):
if (self._capabilities['Personality'] == 'Controller-Active'):
return _('Controller-Active')
else:
return _('Controller-Standby')
return self._get_display_value(self.PERSONALITY_DISPLAY_CHOICES,
self._personality)
@property
def additional_subfunctions(self):
return len(self._subfunctions.split(',')) > 1
@property
def is_cpe(self):
subfunctions = self._subfunctions.split(',')
if PERSONALITY_CONTROLLER in subfunctions and \
PERSONALITY_WORKER in subfunctions:
return True
else:
return False
@property
def subfunctions(self):
return self._subfunctions.split(',')
@property
def subfunction_oper(self):
return self._get_display_value(self.OPER_DISPLAY_CHOICES,
self._subfunction_oper)
@property
def subfunction_avail(self):
return self._get_display_value(self.AVAIL_DISPLAY_CHOICES,
self._subfunction_avail)
@property
def config_required(self):
return self.config_status == 'config required'
@property
def location(self):
if hasattr(self._location, 'locn'):
return self._location.locn
if 'locn' in self._location:
return self._location['locn']
else:
return ''
@property
def peers(self):
if hasattr(self._peers, 'name'):
return self._peers.name
if self._peers and 'name' in self._peers:
return self._peers['name']
else:
return ''
@property
def boottime(self):
return timezone.now() - datetime.timedelta(
seconds=self.uptime)
@property
def administrative(self):
return self._get_display_value(self.ADMIN_DISPLAY_CHOICES,
self._administrative)
@property
def operational(self):
return self._get_display_value(self.OPER_DISPLAY_CHOICES,
self._operational)
@property
def availability(self):
return self._get_display_value(self.AVAIL_DISPLAY_CHOICES,
self._availability)
@property
def bm_type(self):
bm_type = self._bm_type
return bm_type
@property
def ttys_dcd(self):
return self._ttys_dcd == 'True'
@property
def clock_synchronization(self):
return self._get_display_value(CLOCK_SYNCHRONIZATION_CHOICES,
self._clock_synchronization)
@property
def patch_state(self):
return self._get_display_value(self.PATCH_STATE_DISPLAY_CHOICES,
self._patch_state)
@property
def install_state(self):
return self._get_display_value(self.INSTALL_STATE_DISPLAY_CHOICES,
self._install_state)
def _get_display_value(self, display_choices, data):
"""Lookup the display value in the provided dictionary."""
display_value = [display for (value, display) in display_choices
if value.lower() == (data or '').lower()]
if display_value:
return display_value[0]
return None
def system_list(request):
systems = cgtsclient(request).isystem.list()
return [System(n) for n in systems]
def system_get(request):
system = cgtsclient(request).isystem.list()[0]
if not system:
raise ValueError('No system found.')
return System(system)
def system_update(request, system_id, **kwargs):
LOG.debug("system_update(): system_id=%s, kwargs=%s", system_id, kwargs)
mypatch = []
for key, value in kwargs.items():
mypatch.append(dict(path='/' + key, value=value, op='replace'))
return cgtsclient(request).isystem.update(system_id, mypatch)
def host_create(request, **kwargs):
LOG.debug("host_create(): kwargs=%s", kwargs)
host = cgtsclient(request).ihost.create(**kwargs)
return Host(host)
def host_update(request, host_id, **kwargs):
LOG.debug("host_update(): host_id=%s, kwargs=%s", host_id, kwargs)
mypatch = []
for key, value in kwargs.items():
mypatch.append(dict(path='/' + key, value=value, op='replace'))
return cgtsclient(request).ihost.update(host_id, mypatch)
def host_delete(request, host_id):
LOG.debug("host_delete(): host_id=%s", host_id)
return cgtsclient(request).ihost.delete(host_id)
def host_lock(request, host_id):
kwargs = {'action': 'lock'}
host = host_update(request, host_id, **kwargs)
return host
def host_force_lock(request, host_id):
kwargs = {'action': 'force-lock'}
host = host_update(request, host_id, **kwargs)
return host
def host_unlock(request, host_id):
kwargs = {'action': 'unlock'}
host = host_update(request, host_id, **kwargs)
return host
def host_force_unlock(request, host_id):
kwargs = {'action': 'force-unlock'}
host = host_update(request, host_id, **kwargs)
return host
def host_reboot(request, host_id):
kwargs = {'action': 'reboot'}
host = host_update(request, host_id, **kwargs)
return host
def host_reset(request, host_id):
kwargs = {'action': 'reset'}
host = host_update(request, host_id, **kwargs)
return host
def host_reinstall(request, host_id):
kwargs = {'action': 'reinstall'}
host = host_update(request, host_id, **kwargs)
return host
def host_power_on(request, host_id):
kwargs = {'action': 'power-on'}
host = host_update(request, host_id, **kwargs)
return host
def host_power_off(request, host_id):
kwargs = {'action': 'power-off'}
host = host_update(request, host_id, **kwargs)
return host
def host_swact(request, host_id):
kwargs = {'action': 'swact'}
host = host_update(request, host_id, **kwargs)
return host
def host_get(request, host_id):
host = cgtsclient(request).ihost.get(host_id)
if not host:
raise ValueError('No match found for host_id "%s".' % host_id)
return Host(host)
def host_list(request):
hosts = cgtsclient(request).ihost.list()
return [Host(n) for n in hosts]
class DNS(base.APIResourceWrapper):
"""..."""
_attrs = ['isystem_uuid', 'nameservers', 'uuid', 'link']
def __init__(self, apiresource):
super(DNS, self).__init__(apiresource)
def dns_update(request, dns_id, **kwargs):
LOG.debug("dns_update(): dns_id=%s, kwargs=%s", dns_id, kwargs)
mypatch = []
for key, value in kwargs.items():
if key == 'nameservers' and not value:
value = 'NC'
mypatch.append(dict(path='/' + key, value=value, op='replace'))
return cgtsclient(request).idns.update(dns_id, mypatch)
def dns_delete(request, dns_id):
LOG.debug("dns_delete(): dns_id=%s", dns_id)
return cgtsclient(request).idns.delete(dns_id)
def dns_get(request, dns_id):
dns = cgtsclient(request).idns.get(dns_id)
if not dns:
raise ValueError('No match found for dns_id "%s".' % dns_id)
return DNS(dns)
def dns_list(request):
dns = cgtsclient(request).idns.list()
return [DNS(n) for n in dns]
class NTP(base.APIResourceWrapper):
"""..."""
_attrs = ['isystem_uuid', 'ntpservers', 'uuid', 'link']
def __init__(self, apiresource):
super(NTP, self).__init__(apiresource)
def ntp_update(request, ntp_id, **kwargs):
LOG.debug("ntp_update(): ntp_id=%s, kwargs=%s", ntp_id, kwargs)
mypatch = []
for key, value in kwargs.items():
if key == 'ntpservers' and not value:
value = 'NC'
mypatch.append(dict(path='/' + key, value=value, op='replace'))
return cgtsclient(request).intp.update(ntp_id, mypatch)
def ntp_delete(request, ntp_id):
LOG.debug("ntp_delete(): ntp_id=%s", ntp_id)
return cgtsclient(request).intp.delete(ntp_id)
def ntp_get(request, ntp_id):
ntp = cgtsclient(request).intp.get(ntp_id)
if not ntp:
raise ValueError('No match found for ntp_id "%s".' % ntp_id)
return NTP(ntp)
def ntp_list(request):
ntp = cgtsclient(request).intp.list()
return [NTP(n) for n in ntp]
class PTP(base.APIResourceWrapper):
"""..."""
_attrs = ['isystem_uuid', 'mode',
'transport', 'mechanism', 'uuid', 'link']
def __init__(self, apiresource):
super(PTP, self).__init__(apiresource)
def ptp_update(request, ptp_id, **kwargs):
LOG.debug("ptp_update(): ptp_id=%s, kwargs=%s", ptp_id, kwargs)
mypatch = []
for key, value in kwargs.items():
mypatch.append(dict(path='/' + key, value=value, op='replace'))
return cgtsclient(request).ptp.update(ptp_id, mypatch)
def ptp_delete(request, ptp_id):
LOG.debug("ptp_delete(): ptp_id=%s", ptp_id)
return cgtsclient(request).ptp.delete(ptp_id)
def ptp_get(request, ptp_id):
ptp = cgtsclient(request).ptp.get(ptp_id)
if not ptp:
raise ValueError('No match found for ptp_id "%s".' % ptp_id)
return PTP(ptp)
def ptp_list(request):
ptp = cgtsclient(request).ptp.list()
return [PTP(n) for n in ptp]
class EXTOAM(base.APIResourceWrapper):
"""..."""
_attrs = ['isystem_uuid', 'oam_subnet', 'oam_gateway_ip',
'oam_floating_ip', 'oam_c0_ip', 'oam_c1_ip',
'oam_start_ip', 'oam_end_ip',
'uuid', 'link', 'region_config']
def __init__(self, apiresource):
super(EXTOAM, self).__init__(apiresource)
if hasattr(self, 'uuid'):
self._oam_subnet = self.oam_subnet
self._oam_gateway_ip = self.oam_gateway_ip
self._oam_floating_ip | |
<gh_stars>0
import sys
import time
MAX_INT = 2 ** 31 - 1
def create_weights_matrix(matrix):
# The original matrix contains 0 for an empty cell and 1 for a walled one. We change them to positive numbers 1
# and 1000 respectively. This allows to calculate shortest paths based on the sum of the weights preferring the 1
# options always (since the mazes used by the google foo bar challenge are max 20 in size and the sum of empty
# cells will never exceed 1000).
empty_cell_weight = 1
walled_cell_weight = 1000
number_rows = len(matrix)
number_columns = len(matrix[0])
weights_matrix = [[0 for j in range(number_columns)]
for i in range(number_rows)]
for i in range(0, number_rows):
for j in range(0, number_columns):
if matrix[i][j] == 0:
weights_matrix[i][j] = empty_cell_weight
else:
weights_matrix[i][j] = walled_cell_weight
return weights_matrix
def find_adjacents_of_a_cell(x, y, number_columns, number_rows):
# Simply returns an array of coordinates of cells
# that are adjacent to a given one passed as parameter (x,y).
# Also requires the size of the containing matrix to take boundaries into consideration.
if x >= number_rows or y >= number_columns:
return []
# transform into indexes
number_columns = number_columns - 1
number_rows = number_rows - 1
if x == 0 and y == 0:
return [(x, y + 1), (x + 1, y)]
if x == 0 and 0 < y < number_columns:
return [(x, y + 1), (x + 1, y), (x, y - 1)]
if x == 0 and y == number_columns:
return [(x + 1, y), (x, y - 1)]
if 0 < x < number_rows and y == 0:
return [(x - 1, y), (x, y + 1), (x + 1, y)]
if 0 < x < number_rows and 0 < y < number_columns:
return [(x - 1, y), (x, y + 1), (x + 1, y), (x, y - 1)]
if 0 < x < number_rows and y == number_columns:
return [(x - 1, y), (x + 1, y), (x, y - 1)]
if x == number_rows and y == 0:
return [(x - 1, y), (x, y + 1)]
if x == number_rows and 0 < y < number_columns:
return [(x - 1, y), (x, y + 1), (x, y - 1)]
if x == number_rows and y == number_columns:
return [(x - 1, y), (x, y - 1)]
def BFS(matrix):
rows = len(matrix)
cols = len(matrix[0])
weights = create_weights_matrix(matrix)
# keeps track of the cells that have been visited by the BFS procedure
visited_matrix = [[0 for j in range(cols)] for i in range(rows)]
# populate a new matrix with the paths from the top left corner to the bottom right one
paths = [[MAX_INT for j in range(cols)] for i in range(rows)]
# top left corner is always 1, this is also required for the procedure to start properly
paths[0][0] = 1
# queue of nodes to visit next
queue = [(0, 0)]
while len(queue) > 0:
node = queue.pop(0)
node_x = node[0]
node_y = node[1]
visited_matrix[node_x][node_y] = 1
adjacents = find_adjacents_of_a_cell(node_x, node_y, cols, rows)
for adj in adjacents:
adj_x = adj[0]
adj_y = adj[1]
new_path_length = paths[node_x][node_y] + weights[adj_x][adj_y]
if visited_matrix[adj_x][adj_y] != 1:
# avoid padding a node to the queue if already present - use a set for performance reasons
queue_set = set(queue)
if adj not in queue_set:
queue.append(adj)
if new_path_length < paths[adj_x][adj_y]:
paths[adj_x][adj_y] = new_path_length
queue.append(adj)
return paths
def find_removable_walls(paths_matrix):
# Produces a list of walls that, if removed, could produce a shorter path
number_cols = len(paths_matrix[0])
number_rows = len(paths_matrix)
removable_walls = []
for i in range(0, number_rows):
for j in range(0, number_cols):
# walls that can be removed in a useful way to produce a shorter paths
# have 2 characteristics: path length between 1000 and 2000 and are adjacent to a cell which is
# part of an existing path (checked below when we do the paths_matrix[adj_x][adj_y] < 1000 comparison)
if 1000 < paths_matrix[i][j] < 2000:
adjacents = find_adjacents_of_a_cell(
i, j, number_cols, number_rows)
counter = 0
for adj in adjacents:
adj_x = adj[0]
adj_y = adj[1]
if paths_matrix[adj_x][adj_y] < 1000:
counter += 1
if counter > 0:
removable_walls.append((i, j))
break
return removable_walls
def solution(maze):
paths_matrix = BFS(maze)
number_cols = len(paths_matrix[0])
number_rows = len(paths_matrix)
# the length of the path is stored in the bottom-right corner
shortest_path = paths_matrix[number_rows - 1][number_cols - 1]
# the shortest possible path won't be shorter than the longest dimension (number or rows or columns)
# if we find something as short as that there's no need to search further
best_absolute_path = number_rows
if number_cols > number_rows:
best_absolute_path = number_cols
if shortest_path == best_absolute_path:
return shortest_path
# can we remove some walls to search for a shorter path?
removable_walls = find_removable_walls(paths_matrix)
for wall in removable_walls:
wall_x = wall[0]
wall_y = wall[1]
maze[wall_x][wall_y] = 0 # remove that wall in the original maze
paths_matrix = BFS(maze)
new_shortest_path_length = paths_matrix[number_rows -
1][number_cols - 1]
if new_shortest_path_length < shortest_path:
shortest_path = new_shortest_path_length
if shortest_path == best_absolute_path: # again, in this case we have finished, can't do better anyway
return shortest_path
# restore the wall before moving to the next one
maze[wall_x][wall_y] = 1
return shortest_path
# the following part is not required for the Google challenge
# Test mazes
maze0 = [[0, 1, 1, 0], [0, 0, 0, 1], [1, 1, 0, 0], [1, 1, 1, 0]]
maze1 = [[0, 1, 1], [1, 0, 0], [1, 1, 0]]
maze2 = [[0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]]
maze3 = [[0, 0, 0], [1, 1, 0], [0, 0, 0], [0, 1, 1], [0, 0, 0]]
maze4 = [[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, | |
<reponame>stijnstijn/j2lsnek
import datetime
import pathlib
import socket
import json
import time
from helpers.handler import port_handler
from helpers.jj2 import jj2server
from helpers.functions import all_mirrors, query, fetch_all, fetch_one
from helpers.exceptions import ServerUnknownException
class servernet_handler(port_handler):
"""
Sync data between list servers
"""
reload_mode = None
def handle_data(self):
"""
Handle incoming API calls
Lots of checking to ensure that incoming data is kosher, then processing and passing it on to other mirrors
"""
self.client.settimeout(5) # should really be enough
loops = 0
payload = None
self.buffer = bytearray()
# only allowed mirrors, plus localhost for 10059 since that's where admin interfaces live
if self.port == 10059:
if self.ip != "127.0.0.1":
self.ls.log.warning("Outside IP %s tried connection to remote admin API" % self.ip)
self.end()
return
elif self.port == 10056:
if self.ip not in all_mirrors() or self.ip == "127.0.0.1" or self.ip == self.ls.ip:
self.ls.log.warning("Unauthorized ServerNet connection from %s:%s" % (self.ip, self.port))
self.end()
return
query("UPDATE mirrors SET lifesign = ? WHERE address = ?", (int(time.time()), self.ip))
# receive API call
while True:
try:
self.buffer.extend(self.client.recv(2048))
loops += 1
except (socket.timeout, TimeoutError):
self.ls.log.error("ServerNet connection from %s timed out while receiving data" % self.key)
break
try:
payload = json.loads(self.buffer.decode("ascii", "ignore"))
break
except ValueError: # older python3s don't support json.JSONDecodeError
pass
if loops > 12: # even our patience knows its limits
break
# if API call not received or readable for whatever reason, give up
if not payload:
if len(self.buffer) > 0:
self.ls.log.error("ServerNet update received from %s, but could not acquire valid payload (got %s)" % (
self.ip, self.buffer.decode("ascii", "ignore")))
self.end()
return
# same for incomplete call
if "action" not in payload or "data" not in payload or "origin" not in payload:
self.ls.log.error("ServerNet update received from %s, but JSON was incomplete" % self.ip)
self.end()
return
# this shouldn't happen, but just in case...
if payload["origin"] == self.ls.address:
self.end()
return
# payload data should be a list, though usually with 0 or 1 items
try:
pass_on = []
for item in payload["data"]:
if self.process_data(payload["action"], item):
pass_on.append(item)
except TypeError:
self.ls.log.error("ServerNet update received from %s, but data was not iterable" % self.ip)
self.end()
return
# ok, payload is valid, process it
self.ls.log.info("Received ServerNet update from %s: %s" % (self.ip, payload["action"]))
# switch on the engine, pass it on
no_broadcast = ["hello", "request", "delist", "request-log", "send-log", "request-log-from"]
if self.port == 10059 and len(pass_on) > 0 and payload["action"] not in no_broadcast and \
payload["action"][0:4] != "get-" and payload["origin"] == "web":
self.ls.broadcast(action=payload["action"], data=pass_on, ignore=[self.ip])
self.end()
# was a reload command given?
if self.reload_mode is not None:
self.ls.reload(mode=self.reload_mode)
return
def process_data(self, action, data):
"""
Process API calls
:param action: Action/API call
:param data: List of items to process for this action
:return: True if call was succesful and can be passed on, False on failure or error
"""
# server listings
if action == "server":
try:
server = jj2server(data["id"])
except KeyError:
self.ls.log.error("Received incomplete server data from ServerNet connection %s" % self.ip)
return False
try:
[server.set(key, data[key]) for key in data]
except IndexError:
self.ls.log.error(
"Received incomplete server data from ServerNet connection %s (unknown field in %s)" % (
self.ip, repr(data)))
server.forget()
return False
server.set("remote", 1)
# we can't do anything with partial data
if server.new and (server.get("ip") is None or server.get("port") is None):
# this means we got a server update before the server has been first 'registered'
# that could happen if an earlier all-server update is missed somehow
server.forget()
# ban list (and whitelist) entries
elif action == "add-banlist":
if "origin" not in data:
data["origin"] = self.ls.address
try:
if not fetch_one(
"SELECT * FROM banlist WHERE address = ? AND type = ? AND note = ? AND origin = ? AND reserved = ?",
(data["address"], data["type"], data["note"], data["origin"], data["reserved"])):
query("INSERT INTO banlist (address, type, note, origin, reserved) VALUES (?, ?, ?, ?, ?)",
(data["address"], data["type"], data["note"], data["origin"], data["reserved"]))
except KeyError:
self.ls.log.error("Received incomplete banlist entry from ServerNet connection %s" % self.ip)
return False
self.ls.log.info("Added banlist entry via ServerNet connection %s" % self.ip)
# removal of ban/whitelist entries
elif action == "delete-banlist":
if "origin" not in data:
data["origin"] = self.ls.address
try:
fetch_one("DELETE FROM banlist WHERE address = ? AND type = ? AND note = ? AND origin = ? AND reserved = ?",
(data["address"], data["type"], data["note"], data["origin"], data["reserved"]))
except KeyError:
self.ls.log.error("Received incomplete banlist deletion request from ServerNet connection %s" % self.ip)
return False
self.ls.log.info("Removed banlist entry via ServerNet connection %s" % self.ip)
# server delistings
elif action == "delist":
try:
server = jj2server(data["id"], create_if_unknown=False)
if server.get("remote") == 1 or server.new:
server.forget()
else:
self.ls.log.error("Mirror %s tried delisting server %s, but server is not remote!" % (self.ip, data["id"]))
except ServerUnknownException:
# trying to delist a server we have no memory of? no
self.ls.log.error(
"Mirror %s tried delisting server %s, but server is unknown" % (self.ip, data["id"]))
return False
except KeyError:
self.ls.log.error("Received incomplete server data from ServerNet connection %s" % self.ip)
return False
self.ls.log.info("Delisted server via ServerNet connection %s" % self.ip)
# add mirror
elif action == "add-mirror":
try:
if fetch_one("SELECT * FROM mirrors WHERE name = ? OR address = ?",
(data["name"], data["address"])):
self.ls.log.info("Mirror %s tried adding mirror %s, but name or address already known" % (
self.ip, data["address"]))
return True
except KeyError:
self.ls.log.error("Received incomplete mirror info from ServerNet connection %s" % self.ip)
return False
if data["name"] == "web":
self.ls.log.error("'web' is a reserved name for mirrors, %s tried using it" % self.ip)
return False
query("INSERT INTO mirrors (name, address) VALUES (?, ?)", (data["name"], data["address"]))
self.ls.broadcast(action="hello", data=[{"from": self.ls.address}], recipients=[data["address"]])
self.ls.log.info("Added mirror %s via ServerNet connection %s" % (data["address"], self.ip))
# delete mirror
elif action == "delete-mirror":
try:
if not fetch_one("SELECT * FROM mirrors WHERE name = ? AND address = ?",
(data["name"], data["address"])):
self.ls.log.info("Mirror %s tried removing mirror %s, but not known" % (self.ip, data["address"]))
return True
except KeyError:
self.ls.log.error("Received incomplete mirror deletion request from ServerNet connection %s" % self.ip)
return False
query("DELETE FROM mirrors WHERE name = ? AND address = ?", (data["name"], data["address"]))
self.ls.log.info("Deleted mirror %s via ServerNet connection %s" % (data["address"], self.ip))
# motd updates
elif action == "set-motd":
if "expires" not in data or data["expires"] == "":
t = datetime.datetime.utcfromtimestamp(time.time() + 86400 * 3)
data["expires"] = t.strftime("%d-%m-%Y %H:%M")
try:
timestamp = fetch_one("SELECT value FROM settings WHERE item = ?", ("motd-updated",))
if timestamp and int(timestamp["value"]) > int(data["motd-updated"]):
self.ls.log.info("Received MOTD update from %s, but own MOTD was more recent" % self.ip)
return False
except KeyError:
self.ls.log.error("Received incomplete MOTD from ServerNet connection %s" % self.ip)
return False
try:
expires = datetime.datetime.strptime(data["expires"], "%d-%m-%Y %H:%M")
expires = expires.timestamp()
except (ValueError, OSError):
expires = int(time.time()) + 86400 * 3
query("UPDATE settings SET value = ? WHERE item = ?", (data["motd"], "motd"))
query("UPDATE settings SET value = ? WHERE item = ?", (int(time.time()), "motd-updated"))
query("UPDATE settings SET value = ? WHERE item = ?", (int(expires), "motd-expires"))
self.ls.log.info("Updated MOTD via ServerNet connection %s" % self.ip)
# sync requests: send all data
elif action == "request" or action == "hello":
# in case of "hello", also send a request for data to the other server
if action == "hello":
self.ls.broadcast(action="request", data=[{"from": self.ls.address}], recipients=[self.ip])
self.cleanup() # removes stale servers, etc
# servers
if "fragment" not in data or "servers" in data["fragment"]:
servers = fetch_all("SELECT * FROM servers WHERE players > 0 AND origin = ?", (self.ls.address,))
self.ls.broadcast(action="server", data=[{key: server[key] for key in server.keys()} for server in servers],
recipients=[self.ip])
# banlist
if "fragment" not in data or "banlist" in data["fragment"]:
banlist = fetch_all("SELECT * FROM banlist")
self.ls.broadcast(action="add-banlist", data=[{key: ban[key] for key in ban.keys()} for ban in banlist],
recipients=[self.ip])
# mirrors
if "fragment" not in data or "mirrors" not in data["fragment"]:
mirrors = fetch_all("SELECT name, address FROM mirrors")
self.ls.broadcast(action="add-mirror",
data=[{key: mirror[key] for key in mirror.keys()} for mirror in mirrors],
recipients=[self.ip])
# motd
if "fragment" not in data or "motd" in data["fragment"]:
settings = fetch_all("SELECT * FROM settings WHERE item IN (?, ?)", ("motd", "motd-updated"))
self.ls.broadcast(action="set-motd", data=[{item["item"]: item["value"] for item in settings}],
recipients=[self.ip])
self.ls.log.info("Sent sync data to ServerNet connection | |
( 5pm PST, 6pm MST,
# 8pm EST ) since the "last" file before then will be an irregular amount
# of time prior: 00:00 UTC - 22:45 UTC = -1 hour 15 minutes
if thisDatetime.hour == 0 and thisDatetime.minute == 0:
lastDatetime = lastDatetime - timedelta(hours = 1)
lastDatetime = (thisDatetime - timedelta(minutes = 15))
lastDatestring = lastDatetime.strftime(strptimeFormat)
# First-run datetime, timedelta, and string juggling for generating
# last-most-recent URLS for download.
for table in ['gkg', 'mentions', 'events']:
priorURLs[table] = ''.join([self.gBase.toolData['URLbase'],
lastDatestring, '.',
self.gBase.toolData['extensions'][table]])
# Shouldn't apply for first run, since no last/next file is set yet, and
# shouldn't matter for the last run, since self.realtimeWindow running out
# will halt execution in loopEDA() anyway.
if self.lastRealDatetime != '' and self.nextRealDatetime != '':
if thisDatetime == self.lastRealDatetime:
print("\n----------------------------------------------------------\n")
print("Isn't %s the same as %s ? Too early! No new update yet!" %
(thisDatetime.strftime[strftimeFormat],
self.lastRealDatetime.strftime[strftimeFormat]))
return (False, 'tooEarly')
elif thisDatetime > self.nextRealDatetime:
print("\n----------------------------------------------------------\n")
print("%s is a little later than %s . Too late! We missed one!" %
(thisDatetime.strftime[strftimeFormat],
self.lastRealDatetime.strftime[strftimeFormat]))
return (False, 'tooLate')
print(" URLs acquired:\n")
print("current:")
pp(fileURLs)
print("prior:")
pp(priorURLs)
print("Beginning per-table operations...\n")
for table in tableList:
# B05a - every-table operations
# Note that order of execution for all tables will be project-typical.
# Tracking per-table loop times
timecheckT = time()
print("Trying downloading and cleaning for most recent", table,
"file...")
# making use of alternate-mode functionality for GDELTbase methods.
thisDL = self.gBase.downloadGDELTFile(fileURLs[table], table,
verbose = True, mode = 'realtime')
# Matching the same input formatting requirements, typically performed
# in the 'table' versions of GDELTbase methods
fileName = fileURLs[table].replace(self.gBase.toolData['URLbase'], '')
fileName = fileName.replace('.zip', '')
# cleaning the file (exported to realtimeClean as .json)
thisClean = self.gBase.cleanFile(fileName, verbose = True,
mode = 'realtime')
# tracking prior URLs, still, might delete this section
lastFileName = priorURLs[table].replace(self.gBase.toolData['URLbase'], '')
lastFileName = lastFileName.replace('.zip', '')
# GKG still has different extensions...
if table == 'gkg':
cleanFileName = fileName.replace('.csv', '.json')
cleanLastFileName = lastFileName.replace('.csv', '.json')
else:
cleanFileName = fileName.replace('.CSV', '.json')
cleanLastFileName = lastFileName.replace('.CSV', '.json')
# Each iterative run of this function will add another most-recent
# datafile, so long as it hasn't already been collected and cleaned, but
# the first run should wipe per-table collections before populating 'em
# with records.
if not self.realtimeStarted:
print(" Dropping any old realtime GDELT MongoDB collection...")
self.gBase.localDb['collections']['realtime'][table].drop()
print("Starting MongoDB export for acquired file...")
thisMongo = self.gBase.mongoFile(cleanFileName, table, verbose = True,
mode = 'realtime')
print('')
# Permitting delay of report generation for N iterations
if lastRun:
pass
# bails on this loop iteration if not final realtimeEDA() iteration
else:
continue
# If lastRun == True, EDA processing will be executed in this iteration
# for any records in the 'realtime' MongoDB collection for this table.
print("Beginning EDA processing...")
# switching to table-appropriate logPath directory...
os.chdir(self.logPath[table]['realtime'])
# B05b - Events/Mentions handling
# Per-table records querying, DataFrame shaping, and Pandas Profiling
# EDA ProfileReport() generation.
if table == 'events' or table == 'mentions':
timecheckG = time()
print("\n Loading", table, "realtimeEDA files held locally...",
end = '')
thisDF = pd.DataFrame.from_records(list(
self.gBase.localDb['collections']['realtime'][table].find(
projection = {"_id" : 0},
allow_disk_use = True,
no_cursor_timeout = True,
),
), columns = self.gBase.toolData['names'][table]['reduced'])
print(" ")
print(" Setting dtypes...")
thisDF = thisDF.astype(
dtype = self.gBase.toolData['columnTypes'][table],
copy = False,
)
print(" Converting datetimes...")
if table == 'events':
datetimeField = 'DATEADDED'
# mentions has an extra datetime field, 'EventTimeDate', converted here
if table == 'mentions':
datetimeField = 'MentionTimeDate'
thisDF['EventTimeDate'] = pd.to_datetime(thisDF['EventTimeDate'],
format = datetimeFormat)
thisDF[datetimeField] = pd.to_datetime(thisDF[datetimeField],
format = datetimeFormat)
print("\n ", table, "DataFrame .info():\n")
print(thisDF.info(),'\n')
edaDateString = thisDF[datetimeField].min().strftime(strftimeFormat)
if table == 'events':
configName = "GDELTeventsEDAconfig_realtime.yaml"
edaLogName = ''.join(["GDELT_Events_realtime_EDA_", edaDateString,
".html"])
if table == 'mentions':
configName = "GDELTmentionsEDAconfig_realtime.yaml"
edaLogName = ''.join(["GDELT_Mentions_realtime_EDA_", edaDateString,
".html"])
print(" File to output:", edaLogName)
profile = ProfileReport(thisDF, config_file = configName)
print(" Generating html from report...")
profile.to_file(edaLogName)
EDAFiles[table].append(edaLogName)
del profile
del thisDF
print('')
print('------------------------------------------------------------\n')
# B05c - GKG handling
if table == 'gkg':
print("\n Pulling any", table, "realtime EDA files...", end = '')
timecheckG = time()
thisDF = pd.DataFrame.from_records(list(
self.gBase.localDb['collections']['realtime'][table].find(
projection = {"_id" : 0},
allow_disk_use = True,
no_cursor_timeout = True,
),
), columns = self.gBase.toolData['names']['gkg']['reduced'])
print(" ( %0.3f s )" % (float(time()) - float(timecheckG)))
# Reusing GDELTedaGKGhelpers.py functions, since they'll work
# in this context. See that file for code and documentation.
timecheckG = time()
print(" Applying initial dtypes...", end = '')
thisDF = GDELTedaGKGhelpers.applyDtypes(thisDF)
print(" ( %0.3f s )" % (float(time()) - float(timecheckG)))
timecheckG = time()
print(" Converting datetimes...", end = '')
thisDF = GDELTedaGKGhelpers.convertDatetimes(thisDF)
print(" ( %0.3f s )" % (float(time()) - float(timecheckG)))
edaDateString = thisDF['V21DATE'].min().strftime(strftimeFormat)
timecheckG = time()
print(" Splitting and forming columns from V15Tone...")
thisDF = GDELTedaGKGhelpers.convertGKGV15Tone(thisDF)
print(" ( took %0.3f s )" % (float(time()) - float(timecheckG)))
# B05d - GKG non-variable-length EDA generation
# Isolating main columns for their own EDA, dropping variable-length
# columns for copy (not inplace).
timecheckG = time()
print(" Starting EDA generation for main GKG columns only...", end='')
mainDF = thisDF.drop(columns = ['V1Locations',
'V1Counts',
'V1Themes',
'V1Persons',
'V1Organizations'])
print(" ( drop/copy: %0.3f s )" % (float(time()) - float(timecheckG)))
print("\n GKG main columns DataFrame .info():\n")
print(mainDF.info())
print('')
# constructing EDA output filename
configName = "GDELTgkgMainEDAconfig_realtime.yaml"
edaLogName = ''.join(["GDELT_GKG_realtime_main_EDA_", edaDateString,
".html"])
# Generating non-variable-length-subfield column EDA
print("\n File to output:", edaLogName)
profile = ProfileReport(mainDF, config_file = configName)
print(" Generating html from report...")
profile.to_file(edaLogName)
EDAFiles[table].append(edaLogName)
print("\n ( ProfileReport() + .to_file() : %0.3f s )" %
(float(time()) - float(timecheckG)))
del profile
del mainDF
print(" Continuing processing with separate normalization of each",
"variable-length subfield...\n")
# B05e - V1Locations EDA generation
timecheckG = time()
print(" Exploding V1Locations...", end = '')
locationDF = thisDF.drop(columns = ['V1Counts',
'V1Themes',
'V1Persons',
'V1Organizations'])
locationDF = locationDF.explode('V1Locations')
print(" ( drop/explode: %0.3f s )" % \
(float(time()) - float(timecheckG)))
timecheckG = time()
print(" Normalizing V1Locations...", end = '')
subcols = pd.json_normalize(locationDF['V1Locations'])
print(" ( %0.3f s )" % (float(time()) - float(timecheckG)))
timecheckG = time()
print(" Renaming columns, dropping old, rejoining, astyping...",
end = '')
subcols.columns = [f"V1Locations_{c}" for c in subcols.columns]
locationDF = locationDF.drop(columns = ['V1Locations']).join(
subcols).astype({'V1Locations_FullName' : pd.StringDtype(),
'V1Locations_CountryCode' : pd.StringDtype(),
'V1Locations_ADM1Code' : pd.StringDtype(),
'V1Locations_FeatureID' : pd.StringDtype(),},
copy = False)
print(" ( %0.3f s )" % (float(time()) - float(timecheckG)))
print("\n GKG Locations-normalized DataFrame .info():\n")
print(locationDF.info())
print(" Setting index to 'GKGRECORDID'...")
locationDF.set_index(keys='GKGRECORDID', drop = True, append = False,
inplace = True, verify_integrity = False)
configName = "GDELTgkgLocationsEDAconfig_realtime.yaml"
edaLogName = ''.join(["GDELT_GKG_realtime_locations_EDA_",
edaDateString,
".html"])
timecheckG = time()
print("\n File to output:", edaLogName)
profile = ProfileReport(locationDF, config_file = configName)
print(" Generating html from report...")
profile.to_file(edaLogName)
EDAFiles[table].append(edaLogName)
print("\n ( ProfileReport() + .to_file() : %0.3f s )" %
(float(time()) - float(timecheckG)))
del locationDF
del profile
# B05f - V1Counts EDA generation
timecheckG = time()
print(" Exploding V1Counts...", end = '')
countsDF = thisDF.drop(columns = ['V1Locations',
'V1Themes',
'V1Persons',
'V1Organizations'])
print(" ( drop/explode: %0.3f s )" % \
(float(time()) - float(timecheckG)))
countsDF = countsDF.explode('V1Counts')
print(" Normalizing V1Counts...", end = '')
subcols = pd.json_normalize(countsDF['V1Counts'])
print(" ( %0.3f s )" % (float(time()) - float(timecheckG)))
timecheckG = time()
print(" Renaming columns, dropping old, rejoining,",
"astyping... ", end = '')
subcols.columns = [f"V1Counts_{c}" for c in subcols.columns]
countsDF = countsDF.drop(columns = ['V1Counts']).join(
subcols).astype({
'V1Counts_CountType' : pd.StringDtype(),
'V1Counts_ObjectType' : pd.StringDtype(),
'V1Counts_LocationFullName' : pd.StringDtype(),
'V1Counts_LocationCountryCode' : pd.StringDtype(),
'V1Counts_LocationADM1Code' : pd.StringDtype(),
'V1Counts_LocationFeatureID' : pd.StringDtype(),
}, copy = False)
print("\n GKG Counts-normalized DataFrame .info():\n")
print(countsDF.info())
print(" Setting index to 'GKGRECORDID'...")
countsDF.set_index(keys='GKGRECORDID', drop = True, append = False,
inplace = True, verify_integrity = False)
configName = "GDELTgkgCountsEDAconfig_realtime.yaml"
edaLogName = ''.join(["GDELT_GKG_realtime_counts_EDA_", edaDateString,
".html"])
timecheckG = time()
print("\n File to output:", edaLogName)
profile = ProfileReport(countsDF, config_file = configName)
print("\n Generating html from report...")
profile.to_file(edaLogName)
EDAFiles[table].append(edaLogName)
print("\n ( ProfileReport() + .to_file() : %0.3f s )" %
(float(time()) - float(timecheckG)))
del countsDF
del profile
# B05g - V1Themes EDA generation
timecheckG = time()
print(" Exploding V1Themes...", end = '')
themesDF = thisDF.drop(columns = ['V1Locations',
'V1Counts',
'V1Persons',
'V1Organizations'])
themesDF = themesDF.explode('V1Themes')
print(" ( drop/explode: %0.3f s )" %
| |
<filename>django_comments_xtd/tests/models.py
from datetime import datetime
from django.db import models
from django.db.models import permalink
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.test import TestCase as DjangoTestCase
from django_comments_xtd.models import (XtdComment,
MaxThreadLevelExceededException)
class PublicManager(models.Manager):
"""Returns published articles that are not in the future."""
def published(self):
return self.get_query_set().filter(publish__lte=datetime.now())
class Article(models.Model):
"""Article, that accepts comments."""
title = models.CharField('title', max_length=200)
slug = models.SlugField('slug', unique_for_date='publish')
body = models.TextField('body')
allow_comments = models.BooleanField('allow comments', default=True)
publish = models.DateTimeField('publish', default=datetime.now)
objects = PublicManager()
class Meta:
db_table = 'demo_articles'
ordering = ('-publish',)
@permalink
def get_absolute_url(self):
return ('articles-article-detail', None,
{'year': self.publish.year,
'month': int(self.publish.strftime('%m').lower()),
'day': self.publish.day,
'slug': self.slug})
class Diary(models.Model):
"""Diary, that accepts comments."""
body = models.TextField('body')
allow_comments = models.BooleanField('allow comments', default=True)
publish = models.DateTimeField('publish', default=datetime.now)
objects = PublicManager()
class Meta:
db_table = 'demo_diary'
ordering = ('-publish',)
class ArticleBaseTestCase(DjangoTestCase):
def setUp(self):
self.article_1 = Article.objects.create(
title="September", slug="september", body="During September...")
self.article_2 = Article.objects.create(
title="October", slug="october", body="What I did on October...")
class XtdCommentManagerTestCase(ArticleBaseTestCase):
def test_for_app_models(self):
# there is no comment posted yet to article_1 nor article_2
count = XtdComment.objects.for_app_models("tests.article").count()
self.assert_(count == 0)
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
# post one comment to article_1
XtdComment.objects.create(content_type = article_ct,
object_pk = self.article_1.id,
content_object = self.article_1,
site = site,
comment ="just a testing comment",
submit_date = datetime.now())
count = XtdComment.objects.for_app_models("tests.article").count()
self.assert_(count == 1)
# post one comment to article_2
XtdComment.objects.create(content_type = article_ct,
object_pk = self.article_2.id,
content_object = self.article_2,
site = site,
comment = "yet another comment",
submit_date = datetime.now())
count = XtdComment.objects.for_app_models("tests.article").count()
self.assert_(count == 2)
# post a second comment to article_2
XtdComment.objects.create(content_type = article_ct,
object_pk = self.article_2.id,
content_object = self.article_2,
site = site,
comment = "and another one",
submit_date = datetime.now())
count = XtdComment.objects.for_app_models("tests.article").count()
self.assert_(count == 3)
# In order to methods save and test _calculate_thread_ata, simulate the
# following threads, in order of arrival:
#
# testcase cmt.id parent level-0 level-1 level-2
# step1 1 - c1 <- cmt1
# step1 2 - c2 <- cmt2
# step2 3 1 -- c3 <- cmt1 to cmt1
# step2 4 1 -- c4 <- cmt2 to cmt1
# step3 5 2 -- c5 <- cmt1 to cmt2
# step4 6 5 -- -- c6 <- cmt1 to cmt1 to cmt2
# step4 7 4 -- -- c7 <- cmt1 to cmt2 to cmt1
# step5 8 3 -- -- c8 <- cmt1 to cmt1 to cmt1
# step5 9 - c9 <- cmt9
def thread_test_step_1(article):
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
# post Comment 1 with parent_id 0
XtdComment.objects.create(content_type = article_ct,
object_pk = article.id,
content_object = article,
site = site,
comment ="comment 1 to article",
submit_date = datetime.now())
# post Comment 2 with parent_id 0
XtdComment.objects.create(content_type = article_ct,
object_pk = article.id,
content_object = article,
site = site,
comment ="comment 2 to article",
submit_date = datetime.now())
def thread_test_step_2(article):
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
# post Comment 3 to parent_id 1
XtdComment.objects.create(content_type = article_ct,
object_pk = article.id,
content_object = article,
site = site,
comment ="comment 1 to comment 1",
submit_date = datetime.now(),
parent_id = 1)
# post Comment 4 to parent_id 1
XtdComment.objects.create(content_type = article_ct,
object_pk = article.id,
content_object = article,
site = site,
comment ="comment 2 to comment 1",
submit_date = datetime.now(),
parent_id = 1)
def thread_test_step_3(article):
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
# post Comment 5 to parent_id 2
XtdComment.objects.create(content_type = article_ct,
object_pk = article.id,
content_object = article,
site = site,
comment ="comment 1 to comment 1",
submit_date = datetime.now(),
parent_id = 2)
def thread_test_step_4(article):
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
# post Comment 6 to parent_id 5
XtdComment.objects.create(content_type = article_ct,
object_pk = article.id,
content_object = article,
site = site,
comment ="cmt 1 to cmt 1 to cmt 2",
submit_date = datetime.now(),
parent_id = 5)
# post Comment 7 to parent_id 4
XtdComment.objects.create(content_type = article_ct,
object_pk = article.id,
content_object = article,
site = site,
comment ="cmt 1 to cmt 2 to cmt 1",
submit_date = datetime.now(),
parent_id = 4)
def thread_test_step_5(article):
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
# post Comment 8 to parent_id 3
XtdComment.objects.create(content_type = article_ct,
object_pk = article.id,
content_object = article,
site = site,
comment ="cmt 1 to cmt 1 to cmt 1",
submit_date = datetime.now(),
parent_id = 3)
# post Comment 9 with parent_id 0
XtdComment.objects.create(content_type = article_ct,
object_pk = article.id,
content_object = article,
site = site,
comment ="cmt 1 to cmt 2 to cmt 1",
submit_date = datetime.now())
class BaseThreadStep1TestCase(ArticleBaseTestCase):
def setUp(self):
super(BaseThreadStep1TestCase, self).setUp()
thread_test_step_1(self.article_1)
( # cmt.id thread_id parent_id level order
self.c1, # 1 1 1 0 1
self.c2 # 2 2 2 0 1
) = XtdComment.objects.all()
def test_threaded_comments_step_1_level_0(self):
# comment 1
self.assert_(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assert_(self.c1.level == 0 and self.c1.order == 1)
# comment 2
self.assert_(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assert_(self.c2.level == 0 and self.c2.order == 1)
class ThreadStep2TestCase(ArticleBaseTestCase):
def setUp(self):
super(ThreadStep2TestCase, self).setUp()
thread_test_step_1(self.article_1)
thread_test_step_2(self.article_1)
( # cmt.id thread_id parent_id level order
self.c1, # 1 1 1 0 1
self.c3, # 3 1 1 1 2
self.c4, # 4 1 1 1 3
self.c2 # 2 2 2 0 1
) = XtdComment.objects.all()
def test_threaded_comments_step_2_level_0(self):
# comment 1
self.assert_(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assert_(self.c1.level == 0 and self.c1.order == 1)
# comment 2
self.assert_(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assert_(self.c2.level == 0 and self.c2.order == 1)
def test_threaded_comments_step_2_level_1(self):
# comment 3
self.assert_(self.c3.parent_id == 1 and self.c3.thread_id == 1)
self.assert_(self.c3.level == 1 and self.c3.order == 2)
# comment 4
self.assert_(self.c4.parent_id == 1 and self.c4.thread_id == 1)
self.assert_(self.c4.level == 1 and self.c4.order == 3)
class ThreadStep3TestCase(ArticleBaseTestCase):
def setUp(self):
super(ThreadStep3TestCase, self).setUp()
thread_test_step_1(self.article_1)
thread_test_step_2(self.article_1)
thread_test_step_3(self.article_1)
( # cmt.id thread_id parent_id level order
self.c1, # 1 1 1 0 1
self.c3, # 3 1 1 1 2
self.c4, # 4 1 1 1 3
self.c2, # 2 2 2 0 1
self.c5 # 5 2 2 1 2
) = XtdComment.objects.all()
def test_threaded_comments_step_3_level_0(self):
# comment 1
self.assert_(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assert_(self.c1.level == 0 and self.c1.order == 1)
# comment 2
self.assert_(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assert_(self.c2.level == 0 and self.c2.order == 1)
def test_threaded_comments_step_3_level_1(self):
# comment 3
self.assert_(self.c3.parent_id == 1 and self.c3.thread_id == 1)
self.assert_(self.c3.level == 1 and self.c3.order == 2)
# comment 4
self.assert_(self.c4.parent_id == 1 and self.c4.thread_id == 1)
self.assert_(self.c4.level == 1 and self.c4.order == 3)
# comment 5
self.assert_(self.c5.parent_id == 2 and self.c5.thread_id == 2)
self.assert_(self.c5.level == 1 and self.c5.order == 2)
class ThreadStep4TestCase(ArticleBaseTestCase):
def setUp(self):
super(ThreadStep4TestCase, self).setUp()
thread_test_step_1(self.article_1)
thread_test_step_2(self.article_1)
thread_test_step_3(self.article_1)
thread_test_step_4(self.article_1)
( # cmt.id thread_id parent_id level order
self.c1, # 1 1 1 0 1
self.c3, # 3 1 1 1 2
self.c4, # 4 1 1 1 3
self.c7, # 7 1 4 2 4
self.c2, # 2 2 2 0 1
self.c5, # 5 2 2 1 2
self.c6 # 6 2 5 2 3
) = XtdComment.objects.all()
def test_threaded_comments_step_4_level_0(self):
# comment 1
self.assert_(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assert_(self.c1.level == 0 and self.c1.order == 1)
# comment 2
self.assert_(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assert_(self.c2.level == 0 and self.c2.order == 1)
def test_threaded_comments_step_4_level_1(self):
# comment 3
self.assert_(self.c3.parent_id == 1 and self.c3.thread_id == 1)
self.assert_(self.c3.level == 1 and self.c3.order == 2)
# comment 4
self.assert_(self.c4.parent_id == 1 and self.c4.thread_id == 1)
self.assert_(self.c4.level == 1 and self.c4.order == 3)
# comment 5
self.assert_(self.c5.parent_id == 2 and self.c5.thread_id == 2)
self.assert_(self.c5.level == 1 and self.c5.order == 2)
def test_threaded_comments_step_4_level_2(self):
# comment 6
self.assert_(self.c6.parent_id == 5 and self.c6.thread_id == 2)
self.assert_(self.c6.level == 2 and self.c6.order == 3)
# comment 7
self.assert_(self.c7.parent_id == 4 and self.c7.thread_id == 1)
self.assert_(self.c7.level == 2 and self.c7.order == 4)
class ThreadStep5TestCase(ArticleBaseTestCase):
def setUp(self):
super(ThreadStep5TestCase, self).setUp()
thread_test_step_1(self.article_1)
thread_test_step_2(self.article_1)
thread_test_step_3(self.article_1)
thread_test_step_4(self.article_1)
thread_test_step_5(self.article_1)
( # cmt.id thread_id parent_id level order
self.c1, # 1 1 1 0 1
self.c3, # 3 1 1 | |
# coding: utf-8
"""
Copyright 2017 Square, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1Tender(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, type=None, name=None, employee_id=None, receipt_url=None, card_brand=None, pan_suffix=None, entry_method=None, payment_note=None, total_money=None, tendered_money=None, change_back_money=None, refunded_money=None, is_exchange=None):
"""
V1Tender - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'str',
'type': 'str',
'name': 'str',
'employee_id': 'str',
'receipt_url': 'str',
'card_brand': 'str',
'pan_suffix': 'str',
'entry_method': 'str',
'payment_note': 'str',
'total_money': 'V1Money',
'tendered_money': 'V1Money',
'change_back_money': 'V1Money',
'refunded_money': 'V1Money',
'is_exchange': 'bool'
}
self.attribute_map = {
'id': 'id',
'type': 'type',
'name': 'name',
'employee_id': 'employee_id',
'receipt_url': 'receipt_url',
'card_brand': 'card_brand',
'pan_suffix': 'pan_suffix',
'entry_method': 'entry_method',
'payment_note': 'payment_note',
'total_money': 'total_money',
'tendered_money': 'tendered_money',
'change_back_money': 'change_back_money',
'refunded_money': 'refunded_money',
'is_exchange': 'is_exchange'
}
self._id = id
self._type = type
self._name = name
self._employee_id = employee_id
self._receipt_url = receipt_url
self._card_brand = card_brand
self._pan_suffix = pan_suffix
self._entry_method = entry_method
self._payment_note = payment_note
self._total_money = total_money
self._tendered_money = tendered_money
self._change_back_money = change_back_money
self._refunded_money = refunded_money
self._is_exchange = is_exchange
@property
def id(self):
"""
Gets the id of this V1Tender.
The tender's unique ID.
:return: The id of this V1Tender.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this V1Tender.
The tender's unique ID.
:param id: The id of this V1Tender.
:type: str
"""
self._id = id
@property
def type(self):
"""
Gets the type of this V1Tender.
The type of tender.
:return: The type of this V1Tender.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this V1Tender.
The type of tender.
:param type: The type of this V1Tender.
:type: str
"""
self._type = type
@property
def name(self):
"""
Gets the name of this V1Tender.
A human-readable description of the tender.
:return: The name of this V1Tender.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1Tender.
A human-readable description of the tender.
:param name: The name of this V1Tender.
:type: str
"""
self._name = name
@property
def employee_id(self):
"""
Gets the employee_id of this V1Tender.
The ID of the employee that processed the tender.
:return: The employee_id of this V1Tender.
:rtype: str
"""
return self._employee_id
@employee_id.setter
def employee_id(self, employee_id):
"""
Sets the employee_id of this V1Tender.
The ID of the employee that processed the tender.
:param employee_id: The employee_id of this V1Tender.
:type: str
"""
self._employee_id = employee_id
@property
def receipt_url(self):
"""
Gets the receipt_url of this V1Tender.
The URL of the receipt for the tender.
:return: The receipt_url of this V1Tender.
:rtype: str
"""
return self._receipt_url
@receipt_url.setter
def receipt_url(self, receipt_url):
"""
Sets the receipt_url of this V1Tender.
The URL of the receipt for the tender.
:param receipt_url: The receipt_url of this V1Tender.
:type: str
"""
self._receipt_url = receipt_url
@property
def card_brand(self):
"""
Gets the card_brand of this V1Tender.
The brand of credit card provided.
:return: The card_brand of this V1Tender.
:rtype: str
"""
return self._card_brand
@card_brand.setter
def card_brand(self, card_brand):
"""
Sets the card_brand of this V1Tender.
The brand of credit card provided.
:param card_brand: The card_brand of this V1Tender.
:type: str
"""
self._card_brand = card_brand
@property
def pan_suffix(self):
"""
Gets the pan_suffix of this V1Tender.
The last four digits of the provided credit card's account number.
:return: The pan_suffix of this V1Tender.
:rtype: str
"""
return self._pan_suffix
@pan_suffix.setter
def pan_suffix(self, pan_suffix):
"""
Sets the pan_suffix of this V1Tender.
The last four digits of the provided credit card's account number.
:param pan_suffix: The pan_suffix of this V1Tender.
:type: str
"""
self._pan_suffix = pan_suffix
@property
def entry_method(self):
"""
Gets the entry_method of this V1Tender.
The tender's unique ID.
:return: The entry_method of this V1Tender.
:rtype: str
"""
return self._entry_method
@entry_method.setter
def entry_method(self, entry_method):
"""
Sets the entry_method of this V1Tender.
The tender's unique ID.
:param entry_method: The entry_method of this V1Tender.
:type: str
"""
self._entry_method = entry_method
@property
def payment_note(self):
"""
Gets the payment_note of this V1Tender.
Notes entered by the merchant about the tender at the time of payment, if any. Typically only present for tender with the type: OTHER.
:return: The payment_note of this V1Tender.
:rtype: str
"""
return self._payment_note
@payment_note.setter
def payment_note(self, payment_note):
"""
Sets the payment_note of this V1Tender.
Notes entered by the merchant about the tender at the time of payment, if any. Typically only present for tender with the type: OTHER.
:param payment_note: The payment_note of this V1Tender.
:type: str
"""
self._payment_note = payment_note
@property
def total_money(self):
"""
Gets the total_money of this V1Tender.
The total amount of money provided in this form of tender.
:return: The total_money of this V1Tender.
:rtype: V1Money
"""
return self._total_money
@total_money.setter
def total_money(self, total_money):
"""
Sets the total_money of this V1Tender.
The total amount of money provided in this form of tender.
:param total_money: The total_money of this V1Tender.
:type: V1Money
"""
self._total_money = total_money
@property
def tendered_money(self):
"""
Gets the tendered_money of this V1Tender.
The amount of total_money applied to the payment.
:return: The tendered_money of this V1Tender.
:rtype: V1Money
"""
return self._tendered_money
@tendered_money.setter
def tendered_money(self, tendered_money):
"""
Sets the tendered_money of this V1Tender.
The amount of total_money applied to the payment.
:param tendered_money: The tendered_money of this V1Tender.
:type: V1Money
"""
self._tendered_money = tendered_money
@property
def change_back_money(self):
"""
Gets the change_back_money of this V1Tender.
The amount of total_money returned to the buyer as change.
:return: The change_back_money of this V1Tender.
:rtype: V1Money
"""
return self._change_back_money
@change_back_money.setter
def change_back_money(self, change_back_money):
"""
Sets the change_back_money of this V1Tender.
The amount of total_money returned to the buyer as change.
:param change_back_money: The change_back_money of this V1Tender.
:type: V1Money
"""
self._change_back_money = change_back_money
@property
def refunded_money(self):
"""
Gets the refunded_money of this V1Tender.
The total of all refunds applied to this tender. This amount is always negative or zero.
:return: The refunded_money of this V1Tender.
:rtype: V1Money
"""
return self._refunded_money
@refunded_money.setter
def refunded_money(self, refunded_money):
"""
Sets the refunded_money of this V1Tender.
The total of all refunds applied to this tender. This amount is always negative or zero.
:param refunded_money: The refunded_money of this V1Tender.
:type: V1Money
"""
self._refunded_money = refunded_money
@property
def is_exchange(self):
"""
Gets the is_exchange of this V1Tender.
Indicates whether or not the tender is associated with an exchange. If is_exchange is true, the tender represents the value of goods returned in an exchange not the actual money paid. The exchange value reduces the tender amounts needed to pay for items purchased in the exchange.
:return: The is_exchange of this V1Tender.
:rtype: bool
"""
return self._is_exchange
@is_exchange.setter
def is_exchange(self, is_exchange):
"""
Sets the is_exchange of this V1Tender.
Indicates whether or not the tender is associated with an exchange. If is_exchange is true, the tender represents the value of goods returned in an exchange not the actual money paid. The exchange value reduces the tender amounts needed to pay for items purchased in the exchange.
:param is_exchange: The is_exchange of this V1Tender.
:type: bool
"""
self._is_exchange = is_exchange
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, | |
"""
contents = [(0, 5)]
@schema
class JitterInPlace(dj.Computed):
definition = """
-> MEI
-> ImageConfig
-> JitterConfig
---
jitter_activations: longblob # activation resulting from jitter
"""
def make(self, key):
readout_key = key['readout_key']
neuron_id = key['neuron_id']
print('Jitter analysis: Working on neuron_id={}, readout_key={}'.format(neuron_id, readout_key))
mei = (MEI() & key).fetch1('mei')
target_mean, target_contrast, force_stats = (ImageConfig() & key).fetch1('img_mean', 'img_contrast',
'force_stats')
mei, clipped, actual_contrast = adjust_contrast(mei, target_contrast, mu=target_mean, force=force_stats)
jitter_size = int((JitterConfig & key).fetch1('jitter_size'))
# get input statistics
_, img_shape, bias, mu_beh, mu_eye, scale = prepare_data(key, readout_key)
# load the model
models = get_multi_model(key)
adj_model = get_adj_model(models, readout_key, neuron_id, mu_eye=mu_eye)
shift = list(enumerate(range(-jitter_size, jitter_size+1)))
activations = np.empty((len(shift), len(shift)))
with torch.no_grad():
img = torch.Tensor(process(mei[..., None], mu=bias, sigma=scale)[None, ...]).to('cuda')
for (iy, jitter_y), (ix, jitter_x) in product(shift, shift):
jitter_y, jitter_x = int(jitter_y), int(jitter_x)
jittered_img = roll(roll(img, jitter_y, -2), jitter_x, -1)
activations[iy, ix] = adj_model(jittered_img).data.cpu().numpy()[0]
key['jitter_activations'] = activations
self.insert1(key)
@schema
class StartingImage(dj.Manual):
definition = """
image_class: varchar(32) # class of image
image_id: int # unique id under a class
---
image: longblob # actual image
tier: varchar(16) # tier (e.g. train, test, valid)
normalized: tinyint # whether the image is pre-normalized
description='': varchar(128) # additional description of the image
"""
@schema
class ImageStatistics(dj.Computed):
"""
Image statistics in pixel values and in luminance based on interpolated values.
"""
definition = """
-> TargetDataset
---
img_mean: float
img_std: float
lum_mean: float
lum_std: float
"""
def make(self, key):
target_scan = (StaticMultiDataset.Member & key).fetch1('KEY')
f, *_ = ClosestCalibration().get_interp(target_scan)
images = (Frame * ConditionTier & target_scan).fetch('frame')
images = np.stack(images)
key['img_mean'] = images.mean()
key['img_std'] = images.std(axis=(1, 2)).mean()
key['lum_mean'] = f(images).mean()
key['lum_std'] = f(images).std(axis=(1, 2)).mean()
self.insert1(key)
@schema
class GammaImageStatistics(dj.Computed):
"""
Image statistics in pixel values and in luminance based on fitted Gamma curves.
"""
definition = """
-> TargetDataset
---
img_mean: float
img_std: float
lum_mean: float
lum_std: float
"""
def make(self, key):
target_scan = (StaticMultiDataset.Member & key).fetch1('KEY')
f, *_ = ClosestCalibration().get_fs(target_scan)
images = (Frame * ConditionTier & target_scan).fetch('frame')
images = np.stack(images)
key['img_mean'] = images.mean()
key['img_std'] = images.std(axis=(1, 2)).mean()
key['lum_mean'] = f(images).mean()
key['lum_std'] = f(images).std(axis=(1, 2)).mean()
self.insert1(key)
@schema
class ImageGradResponse(dj.Computed):
"""
Gradients at the bunch of natural images
"""
definition = """
-> TargetModel
-> TargetDataset.Unit
---
img_rfs: external-data # gradient at the image
img_activations: longblob # activation of the model at the image
"""
@staticmethod
def init_rf_image(stimulus_shape=(1, 36, 64)):
return torch.zeros(1, *stimulus_shape, device='cuda', requires_grad=True)
def make(self, key):
readout_key = key['readout_key']
neuron_id = key['neuron_id']
print('Working on neuron_id={}, readout_key={}'.format(neuron_id, readout_key))
_, img_shape, bias, mu_beh, mu_eye, scale = prepare_data(key, readout_key)
target_scan = (StaticMultiDataset.Member & key).fetch1('KEY')
images, tiers = (Frame * ConditionTier & target_scan).fetch('frame', 'tier')
# load the model
models = get_multi_model(key)
adj_model = get_adj_model(models, readout_key, neuron_id, mu_eye=mu_eye)
# compute gradient and activations for each image
img_rfs = []
img_activations = []
for image in tqdm(images):
image = np.atleast_3d(image) # ensure channel dimension exist
image = torch.tensor(process(image, mu=bias, sigma=scale)[None, ...], dtype=torch.float32, requires_grad=True, device='cuda')
# --- Compute gradient receptive field at the image
y = adj_model(image)
y.backward()
img_rfs.append(image.grad.data.cpu().numpy().squeeze())
img_activations.append(y.item())
img_rfs = np.stack(img_rfs)
key['img_rfs'] = img_rfs
key['img_activations'] = img_activations
self.insert1(key)
@schema
class Top100Eigen(dj.Computed):
definition = """
-> ImageGradResponse
---
eigen_values: longblob # eigen values
"""
def make(self, key):
z = (ImageGradResponse() & key).fetch1('img_rfs')
z2 = z.reshape(z.shape[0], -1)
cov = np.cov(z2.T)
vals = np.real(eigvals(cov))[:100]
key['eigen_values'] = vals
self.insert1(key)
@schema
class AllEigen(dj.Computed):
definition = """
-> ImageGradResponse
---
eigen_values: longblob # eigen values
"""
def make(self, key):
z = (ImageGradResponse() & key).fetch1('img_rfs')
z2 = z.reshape(z.shape[0], -1)
cov = np.cov(z2.T)
vals = np.real(eigvals(cov))
key['eigen_values'] = vals
self.insert1(key)
@schema
class GradRF(dj.Computed):
definition = """
-> TargetModel
-> MEIParameter
-> TargetDataset.Unit
---
point_rf : longblob # single gradient RF
rf : longblob # most exciting images
activation : float # activation at the MEI
monotonic : bool # does activity increase monotonically with contrast
max_activation : float # activation at the maximum contrast
max_contrast : float # contrast at which maximum activity is archived
sat_contrast : float # contrast at which image would start saturating
"""
key_source = TargetModel() * MEIParameter() * TargetDataset.Unit & NetworkConfig.CorePlusReadout
@staticmethod
def init_rf_image(stimulus_shape=(1, 36, 64)):
return torch.zeros(1, *stimulus_shape, device='cuda', requires_grad=True)
def make(self, key):
readout_key = key['readout_key']
neuron_id = key['neuron_id']
print('Working on neuron_id={}, readout_key={}'.format(neuron_id, readout_key))
_, img_shape, bias, mu_beh, mu_eye, scale = prepare_data(key, readout_key)
# load the model
models = get_multi_model(key)
adj_model = get_adj_model(models, readout_key, neuron_id, mu_eye=mu_eye)
# --- Compute gradient receptive field
X = self.init_rf_image(img_shape[1:])
y = adj_model(X)
y.backward()
point_rf = X.grad.data.cpu().numpy().squeeze()
rf = X.grad.data
def linear_model(x):
return (x * rf).sum()
params = (MEIParameter() & key).fetch1()
blur = bool(params['blur'])
jitter = int(params['jitter'])
precond = float(params['precond'])
step_gain = float(params['step_gain'])
norm = float(params['norm'])
train_norm = float(params['train_norm'])
octaves = [
{
'iter_n': int(params['iter_n']),
'start_sigma': float(params['start_sigma']),
'end_sigma': float(params['end_sigma']),
'start_step_size': float(params['start_step_size']),
'end_step_size':float(params['end_step_size']),
},
]
# prepare initial image
channels, original_h, original_w = img_shape[-3:]
# the background color of the initial image
background_color = np.float32([128] * channels)
# generate initial random image
gen_image = np.random.normal(background_color, 8, (original_h, original_w, channels))
gen_image = np.clip(gen_image, 0, 255)
# generate class visualization via octavewise gradient ascent
gen_image = deepdraw(linear_model, gen_image, octaves, clip=True,
random_crop=False, blur=blur, jitter=jitter,
precond=precond, step_gain=step_gain,
bias=bias, scale=scale, norm=norm, train_norm=train_norm)
with torch.no_grad():
img = torch.Tensor(process(gen_image, mu=bias, sigma=scale)[None, ...]).to('cuda')
activation = adj_model(img).data.cpu().numpy()[0]
rf = gen_image.squeeze()
cont, vals, lim_contrast = contrast_tuning(adj_model, rf, bias, scale)
key['point_rf'] = point_rf
key['monotonic'] = bool(np.all(np.diff(vals) >= 0))
key['max_activation'] = np.max(vals)
key['max_contrast'] = cont[np.argmax(vals)]
key['sat_contrast'] = np.max(cont)
key['rf'] = rf
key['activation'] = activation
self.insert1(key)
@schema
class RFJitterInPlace(dj.Computed):
definition = """
-> GradRF
-> ImageConfig
-> JitterConfig
---
rf_jitter_activations: longblob # activation resulting from jitter
"""
def make(self, key):
readout_key = key['readout_key']
neuron_id = key['neuron_id']
print('Jitter analysis: Working on neuron_id={}, readout_key={}'.format(neuron_id, readout_key))
rf = (GradRF() & key).fetch1('rf')
target_mean, target_contrast, force_stats = (ImageConfig() & key).fetch1('img_mean', 'img_contrast',
'force_stats')
rf, clipped, actual_contrast = adjust_contrast(rf, target_contrast, mu=target_mean, force=force_stats)
jitter_size = int((JitterConfig & key).fetch1('jitter_size'))
# get input statistics
_, img_shape, bias, mu_beh, mu_eye, scale = prepare_data(key, readout_key)
# load the model
models = get_multi_model(key)
adj_model = get_adj_model(models, readout_key, neuron_id, mu_eye=mu_eye)
shift = list(enumerate(range(-jitter_size, jitter_size+1)))
activations = np.empty((len(shift), len(shift)))
with torch.no_grad():
img = torch.Tensor(process(rf[..., None], mu=bias, sigma=scale)[None, ...]).to('cuda')
for (iy, jitter_y), (ix, jitter_x) in product(shift, shift):
jitter_y, jitter_x = int(jitter_y), int(jitter_x)
jittered_img = roll(roll(img, jitter_y, -2), jitter_x, -1)
activations[iy, ix] = adj_model(jittered_img).data.cpu().numpy()[0]
key['rf_jitter_activations'] = activations
self.insert1(key)
@schema
class ImageShifts(dj.Lookup):
definition = """
x_shift: int # shift in the width dimension
y_shift: int # shift in the hieght dimension
"""
contents = product([-1, 0, 1], [-1, 0, 1])
@schema
class ShiftedRF(dj.Computed):
definition = """
-> GradRF
-> ImageConfig
-> ImageShifts
---
shifted_rf_activation: float # activation resulting from shift
shifted_rf: longblob # copy of the shifted RF
"""
def make(self, key):
readout_key = key['readout_key']
neuron_id = key['neuron_id']
print('Jitter analysis: Working on neuron_id={}, readout_key={}'.format(neuron_id, readout_key))
rf = (GradRF() & key).fetch1('rf')
# adjust the contrast and mean luminance of the image
target_mean, target_contrast, force_stats = (ImageConfig() & key).fetch1('img_mean', 'img_contrast',
'force_stats')
rf, clipped, actual_contrast = adjust_contrast(rf, target_contrast, mu=target_mean, force=force_stats)
# shift the image
x_shift, y_shift = key['x_shift'], key['y_shift']
shifted_rf = np.roll(np.roll(rf, x_shift, 1), y_shift, 0)
# get input statistics
_, img_shape, bias, mu_beh, mu_eye, scale = prepare_data(key, readout_key)
# load the model
models = get_multi_model(key)
adj_model = get_adj_model(models, readout_key, neuron_id, mu_eye=mu_eye)
# compute the activation on the shifted image
with torch.no_grad():
img = torch.Tensor(process(shifted_rf[..., None], mu=bias, sigma=scale)[None, ...]).to('cuda')
activations = adj_model(img).data.cpu().numpy()[0]
key['shifted_rf_activation'] = activations
key['shifted_rf'] = shifted_rf
self.insert1(key)
@schema
class ShiftedMEI(dj.Computed):
definition = """
-> MEI
-> ImageConfig
-> ImageShifts
---
shifted_mei_activation: float # activation resulting from shift
shifted_mei: longblob # copy of the shifted RF
"""
def make(self, key):
readout_key = key['readout_key']
neuron_id = key['neuron_id']
print('Jitter analysis: Working on neuron_id={}, readout_key={}'.format(neuron_id, readout_key))
mei = (MEI() & key).fetch1('mei')
# adjust the contrast and mean luminance of the image
target_mean, target_contrast, force_stats = (ImageConfig() & key).fetch1('img_mean', 'img_contrast',
'force_stats')
mei, clipped, actual_contrast = adjust_contrast(mei, target_contrast, mu=target_mean, force=force_stats)
# shift the image
x_shift, y_shift = key['x_shift'], key['y_shift']
shifted_mei = np.roll(np.roll(mei, x_shift, 1), y_shift, 0)
# get input statistics
_, img_shape, bias, mu_beh, mu_eye, scale = prepare_data(key, readout_key)
# load the model
models = get_multi_model(key)
adj_model = get_adj_model(models, readout_key, neuron_id, mu_eye=mu_eye)
# compute the activation on the shifted image
| |
import os
import re
import time
import json
import pytest
from swsscommon import swsscommon
from dvslib.dvs_common import wait_for_result
class TestRouteBase(object):
def setup_db(self, dvs):
self.pdb = dvs.get_app_db()
self.adb = dvs.get_asic_db()
self.cdb = dvs.get_config_db()
self.sdb = dvs.get_state_db()
def set_admin_status(self, interface, status):
self.cdb.update_entry("PORT", interface, {"admin_status": status})
def create_vrf(self, vrf_name):
initial_entries = set(self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER"))
self.cdb.create_entry("VRF", vrf_name, {"empty": "empty"})
self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER", len(initial_entries) + 1)
current_entries = set(self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER"))
assert len(current_entries - initial_entries) == 1
return list(current_entries - initial_entries)[0]
def remove_vrf(self, vrf_name):
self.cdb.delete_entry("VRF", vrf_name)
def create_l3_intf(self, interface, vrf_name):
if len(vrf_name) == 0:
self.cdb.create_entry("INTERFACE", interface, {"NULL": "NULL"})
else:
self.cdb.create_entry("INTERFACE", interface, {"vrf_name": vrf_name})
def remove_l3_intf(self, interface):
self.cdb.delete_entry("INTERFACE", interface)
def add_ip_address(self, interface, ip):
self.cdb.create_entry("INTERFACE", interface + "|" + ip, {"NULL": "NULL"})
def remove_ip_address(self, interface, ip):
self.cdb.delete_entry("INTERFACE", interface + "|" + ip)
def create_route_entry(self, key, pairs):
tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "ROUTE_TABLE")
fvs = swsscommon.FieldValuePairs(list(pairs.items()))
tbl.set(key, fvs)
def remove_route_entry(self, key):
tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "ROUTE_TABLE")
tbl._del(key)
def check_route_entries(self, destinations):
def _access_function():
route_entries = self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY")
route_destinations = [json.loads(route_entry)["dest"]
for route_entry in route_entries]
return (all(destination in route_destinations for destination in destinations), None)
wait_for_result(_access_function)
def check_route_state(self, prefix, value):
found = False
route_entries = self.sdb.get_keys("ROUTE_TABLE")
for key in route_entries:
if key != prefix:
continue
found = True
fvs = self.sdb.get_entry("ROUTE_TABLE", key)
assert fvs != {}
for f,v in fvs.items():
if f == "state":
assert v == value
assert found
def get_asic_db_key(self, destination):
route_entries = self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY")
for route_entry in route_entries:
if json.loads(route_entry)["dest"] == destination:
return route_entry
return None
def check_route_entries_with_vrf(self, destinations, vrf_oids):
def _access_function():
route_entries = self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY")
route_destination_vrf = [(json.loads(route_entry)["dest"], json.loads(route_entry)["vr"])
for route_entry in route_entries]
return (all((destination, vrf_oid) in route_destination_vrf
for destination, vrf_oid in zip(destinations, vrf_oids)), None)
wait_for_result(_access_function)
def check_route_entries_nexthop(self, destinations, vrf_oids, nexthops):
def _access_function_nexthop():
nexthop_entries = self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP")
nexthop_oids = dict([(self.adb.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", key)["SAI_NEXT_HOP_ATTR_IP"], key)
for key in nexthop_entries])
return (all(nexthop in nexthop_oids for nexthop in nexthops), nexthop_oids)
status, nexthop_oids = wait_for_result(_access_function_nexthop)
def _access_function_route_nexthop():
route_entries = self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY")
route_destination_nexthop = dict([((json.loads(route_entry)["dest"], json.loads(route_entry)["vr"]),
self.adb.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY", route_entry).get("SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID"))
for route_entry in route_entries])
return (all(route_destination_nexthop.get((destination, vrf_oid)) == nexthop_oids.get(nexthop)
for destination, vrf_oid, nexthop in zip(destinations, vrf_oids, nexthops)), None)
wait_for_result(_access_function_route_nexthop)
def check_deleted_route_entries(self, destinations):
def _access_function():
route_entries = self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY")
route_destinations = [json.loads(route_entry)["dest"] for route_entry in route_entries]
return (all(destination not in route_destinations for destination in destinations), None)
wait_for_result(_access_function)
def clear_srv_config(self, dvs):
dvs.servers[0].runcmd("ip address flush dev eth0")
dvs.servers[1].runcmd("ip address flush dev eth0")
dvs.servers[2].runcmd("ip address flush dev eth0")
dvs.servers[3].runcmd("ip address flush dev eth0")
class TestRoute(TestRouteBase):
""" Functionality tests for route """
def test_RouteAddRemoveIpv4Route(self, dvs, testlog):
self.setup_db(dvs)
self.clear_srv_config(dvs)
# create l3 interface
self.create_l3_intf("Ethernet0", "")
self.create_l3_intf("Ethernet4", "")
# check STATE route database, initial state shall be "na"
self.check_route_state("0.0.0.0/0", "na")
# set ip address
self.add_ip_address("Ethernet0", "10.0.0.0/31")
self.add_ip_address("Ethernet4", "10.0.0.2/31")
# bring up interface
self.set_admin_status("Ethernet0", "up")
self.set_admin_status("Ethernet4", "up")
# set ip address and default route
dvs.servers[0].runcmd("ip address add 10.0.0.1/31 dev eth0")
dvs.servers[0].runcmd("ip route add default via 10.0.0.0")
dvs.servers[1].runcmd("ip address add 10.0.0.3/31 dev eth0")
dvs.servers[1].runcmd("ip route add default via 10.0.0.2")
# get neighbor and arp entry
dvs.servers[0].runcmd("ping -c 1 10.0.0.3")
# add route entry
dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 2.2.2.0/24 10.0.0.1\"")
# add default route entry
fieldValues = {"nexthop": "10.0.0.1", "ifname": "Ethernet0"}
self.create_route_entry("0.0.0.0/0", fieldValues)
# check application database
self.pdb.wait_for_entry("ROUTE_TABLE", "2.2.2.0/24")
# check ASIC route database
self.check_route_entries(["2.2.2.0/24"])
# check STATE route database
self.check_route_state("0.0.0.0/0", "ok")
# remove route entry
dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ip route 2.2.2.0/24 10.0.0.1\"")
# remove default route entry
self.remove_route_entry("0.0.0.0/0")
# check application database
self.pdb.wait_for_deleted_entry("ROUTE_TABLE", "2.2.2.0/24")
# check ASIC route database
self.check_deleted_route_entries(["2.2.2.0/24"])
# remove ip address
self.remove_ip_address("Ethernet0", "10.0.0.0/31")
self.remove_ip_address("Ethernet4", "10.0.0.2/31")
# remove l3 interface
self.remove_l3_intf("Ethernet0")
self.remove_l3_intf("Ethernet4")
self.set_admin_status("Ethernet0", "down")
self.set_admin_status("Ethernet4", "down")
# check STATE route database, state set to "na" after deleting the default route
self.check_route_state("0.0.0.0/0", "na")
# remove ip address and default route
dvs.servers[0].runcmd("ip route del default dev eth0")
dvs.servers[0].runcmd("ip address del 10.0.0.1/31 dev eth0")
dvs.servers[1].runcmd("ip route del default dev eth0")
dvs.servers[1].runcmd("ip address del 10.0.0.3/31 dev eth0")
def test_RouteAddRemoveIpv6Route(self, dvs, testlog):
self.setup_db(dvs)
# create l3 interface
self.create_l3_intf("Ethernet0", "")
self.create_l3_intf("Ethernet4", "")
# check STATE route database, initial state shall be "na"
self.check_route_state("::/0", "na")
# bring up interface
self.set_admin_status("Ethernet0", "up")
self.set_admin_status("Ethernet4", "up")
# set ip address
self.add_ip_address("Ethernet0", "fc00:e968:6179::de52:7100/64")
self.add_ip_address("Ethernet4", "2001::1/64")
dvs.runcmd("sysctl -w net.ipv6.conf.all.forwarding=1")
# set ip address and default route
dvs.servers[0].runcmd("ip -6 address add fc00:e968:6179::de52:7100/64 dev eth0")
dvs.servers[0].runcmd("ip -6 route add default via fc00:e968:6179::de52:7100")
dvs.servers[1].runcmd("ip -6 address add 2001::2/64 dev eth0")
dvs.servers[1].runcmd("ip -6 route add default via 2001::1")
time.sleep(2)
# get neighbor entry
dvs.servers[0].runcmd("ping -6 -c 1 2001::2")
# add route entry
dvs.runcmd("vtysh -c \"configure terminal\" -c \"ipv6 route fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/64 fc00:e968:6179::de52:7100\"")
# add default route entry
fieldValues = {"nexthop": "fc00:e968:6179::de52:7100", "ifname": "Ethernet0"}
self.create_route_entry("::/0", fieldValues)
# check application database
self.pdb.wait_for_entry("ROUTE_TABLE", "3000::/64")
# check ASIC route database
self.check_route_entries(["3000::/64"])
# check STATE route database
self.check_route_state("::/0", "ok")
# remove route entry
dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ipv6 route fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/64 fc00:e968:6179::de52:7100\"")
# remove default route entry
self.remove_route_entry("::/0")
# check application database
self.pdb.wait_for_deleted_entry("ROUTE_TABLE", "3000::/64")
# check ASIC route database
self.check_deleted_route_entries(["3000::/64"])
# remove ip address
self.remove_ip_address("Ethernet0", "fc00:e968:6179::de52:7100/64")
self.remove_ip_address("Ethernet4", "2001::1/64")
# remove l3 interface
self.remove_l3_intf("Ethernet0")
self.remove_l3_intf("Ethernet4")
self.set_admin_status("Ethernet0", "down")
self.set_admin_status("Ethernet4", "down")
# check STATE route database, state set to "na" after deleting the default route
self.check_route_state("::/0", "na")
# remove ip address and default route
dvs.servers[0].runcmd("ip -6 route del default dev eth0")
dvs.servers[0].runcmd("ip -6 address del fc00:e968:6179::de52:7100/64 dev eth0")
dvs.servers[1].runcmd("ip -6 route del default dev eth0")
dvs.servers[1].runcmd("ip -6 address del 2001::2/64 dev eth0")
def test_RouteAddRemoveIpv4RouteResolveNeigh(self, dvs, testlog):
self.setup_db(dvs)
self.clear_srv_config(dvs)
# create l3 interface
self.create_l3_intf("Ethernet0", "")
self.create_l3_intf("Ethernet4", "")
# set ip address
self.add_ip_address("Ethernet0", "10.0.0.0/31")
self.add_ip_address("Ethernet4", "10.0.0.2/31")
# bring up interface
self.set_admin_status("Ethernet0", "up")
self.set_admin_status("Ethernet4", "up")
# set ip address and default route
dvs.servers[0].runcmd("ip address add 10.0.0.1/31 dev eth0")
dvs.servers[0].runcmd("ip route add default via 10.0.0.0")
dvs.servers[1].runcmd("ip address add 10.0.0.3/31 dev eth0")
dvs.servers[1].runcmd("ip route add default via 10.0.0.2")
time.sleep(2)
# add route entry -- single nexthop
dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 192.168.3.11/24 10.0.0.1\"")
# add route entry -- multiple nexthop
dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 3.3.3.0/24 10.0.0.1\"")
dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 3.3.3.0/24 10.0.0.3\"")
# check application database
self.pdb.wait_for_entry("ROUTE_TABLE", "2.2.2.0/24")
self.pdb.wait_for_entry("ROUTE_TABLE", "3.3.3.0/24")
# check neighbor got resolved and removed from NEIGH_RESOLVE_TABLE
self.pdb.wait_for_deleted_entry("NEIGH_RESOLVE_TABLE", "Ethernet0:10.0.0.1")
self.pdb.wait_for_deleted_entry("NEIGH_RESOLVE_TABLE", "Ethernet4:10.0.0.3")
# check ASIC route database
self.check_route_entries(["2.2.2.0/24", "3.3.3.0/24"])
# remove route entry
dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ip route 2.2.2.0/24 10.0.0.1\"")
dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ip route 3.3.3.0/24 10.0.0.1\"")
dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ip route 3.3.3.0/24 10.0.0.3\"")
# check application database
self.pdb.wait_for_deleted_entry("ROUTE_TABLE", "2.2.2.0/24")
self.pdb.wait_for_deleted_entry("ROUTE_TABLE", "3.3.3.0/24")
# check ASIC route database
self.check_deleted_route_entries(["2.2.2.0/24", "3.3.3.0/24"])
# remove ip address
self.remove_ip_address("Ethernet0", "10.0.0.0/31")
self.remove_ip_address("Ethernet4", "10.0.0.2/31")
# remove l3 interface
self.remove_l3_intf("Ethernet0")
self.remove_l3_intf("Ethernet4")
self.set_admin_status("Ethernet0", "down")
self.set_admin_status("Ethernet4", "down")
# remove ip address and default route
dvs.servers[0].runcmd("ip route del default dev eth0")
dvs.servers[0].runcmd("ip address del 10.0.0.1/31 dev eth0")
dvs.servers[1].runcmd("ip route del default dev eth0")
dvs.servers[1].runcmd("ip address del 10.0.0.3/31 dev eth0")
def test_RouteAddRemoveIpv6RouteResolveNeigh(self, dvs, testlog):
self.setup_db(dvs)
# create l3 interface
self.create_l3_intf("Ethernet0", "")
self.create_l3_intf("Ethernet4", "")
# bring up interface
self.set_admin_status("Ethernet0", "up")
self.set_admin_status("Ethernet4", "up")
# set ip address
self.add_ip_address("Ethernet0", "fc00:e968:6179::de52:7100/64")
self.add_ip_address("Ethernet4", "2001::1/64")
dvs.runcmd("sysctl -w net.ipv6.conf.all.forwarding=1")
# set ip address and default route
dvs.servers[0].runcmd("ip -6 address add fc00:e968:6179::de52:7100/64 dev eth0")
dvs.servers[0].runcmd("ip -6 route add default via fc00:e968:6179::de52:7100")
dvs.servers[1].runcmd("ip -6 address add 2001::2/64 dev eth0")
dvs.servers[1].runcmd("ip -6 route add default via 2001::1")
time.sleep(2)
# add route entry -- single nexthop
dvs.runcmd("vtysh -c \"configure terminal\" -c \"ipv6 route fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/64 fc00:e968:6179::de52:7100\"")
# add route entry -- multiple nexthop
dvs.runcmd("vtysh -c \"configure terminal\" -c \"ipv6 route fc00:db20:35b:7399::5/64 fc00:e968:6179::de52:7100\"")
dvs.runcmd("vtysh -c \"configure terminal\" -c \"ipv6 route fc00:db20:35b:7399::5/64 2001::2\"")
# check application database
self.pdb.wait_for_entry("ROUTE_TABLE", "3000::/64")
self.pdb.wait_for_entry("ROUTE_TABLE", "4000::/64")
# check neighbor got resolved and removed from NEIGH_RESOLVE_TABLE
self.pdb.wait_for_deleted_entry("NEIGH_RESOLVE_TABLE", "Ethernet0:fc00:e968:6179::de52:7100")
self.pdb.wait_for_deleted_entry("NEIGH_RESOLVE_TABLE", "Ethernet4:2001::2")
# check ASIC route database
self.check_route_entries(["3000::/64", "4000::/64"])
# remove route entry
dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ipv6 route fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/64 fc00:e968:6179::de52:7100\"")
dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ipv6 route fc00:db20:35b:7399::5/64 fc00:e968:6179::de52:7100\"")
dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ipv6 route fc00:db20:35b:7399::5/64 2001::2\"")
# check application database
self.pdb.wait_for_deleted_entry("ROUTE_TABLE", "3000::/64")
self.pdb.wait_for_deleted_entry("ROUTE_TABLE", "4000::/64")
# check ASIC route database
self.check_deleted_route_entries(["3000::/64", "4000::/64"])
# remove ip address
self.remove_ip_address("Ethernet0", "fc00:e968:6179::de52:7100/64")
self.remove_ip_address("Ethernet4", "2001::1/64")
# remove l3 interface
self.remove_l3_intf("Ethernet0")
self.remove_l3_intf("Ethernet4")
self.set_admin_status("Ethernet0", "down")
self.set_admin_status("Ethernet4", "down")
# remove ip address and default route
dvs.servers[0].runcmd("ip -6 route del default dev eth0")
dvs.servers[0].runcmd("ip -6 address del | |
#!/usr/bin/env python3
"""
Node: Fab tree management.
The Node package provides a tree of FabNode's that roughly corresponds to a FreeCAD tree
as shown in the FreeCAD model view.
There are two public classes defined:
* FabBox:
This is a generic bounding box class similar to the FreeCAD BoundBox class
is used to enclose the FabNode contents and its children FabNode's.
This class has way more more properties and is immutable (unlike the FreeCAD BoundBox class.)
* FabNode:
This is a sub-class of FabBox that has a name, a parent FabNode and other data structures
required to maintain the tree.
There are three private classes defined -- Fab_Prefix, Fab_Steps, and Fab_ProduceState.
Other Fab packages (e.g. Project and Solid) further sub-class FabNode to provide finer
grained distinctions between FabNode's.
The FabNode class enforces the following constraints:
* Each FabNode name must be compatible with a Python variable name
(i.e. upper/lower letters, digits, and underscores with a non-digit first letter.)
* All of the children FabNode's must have distinct names.
* A FabNode may occur only once in the Tree (i.e. DAG = Direct Acyclic Graph.)
Two notable attributes of the FabNode are:
* *Up* (FabNode):
The FabNode's parent.
Up is frequently used in code to access other FabNode's higher in the FabNode tree.
* *Project* (FabNode):
The FabNode tree root and is always of type FabProject which is defined in Project package.
Due to the Python language disallowal of circular `import` statements, this is returned
as type FabNode rather than type FabProject.
See the FabNode documentation for further attributes.
(Briefly talk about produce() method here.)
"""
# <--------------------------------------- 100 characters ---------------------------------------> #
from collections import OrderedDict
from dataclasses import dataclass, field
import hashlib
from pathlib import Path as PathFile
from typing import Any, Dict, IO, List, Sequence, Set, Tuple, Union
from typeguard import check_type
from cadquery import Vector # type: ignore
from FabUtilities import FabToolController
from FabShops import FabShop
# FabBox:
@dataclass
class FabBox(object):
"""FabBox: X/Y/Z Axis Aligned Cuboid.
An FabBox is represents a cuboid (i.e. a rectangular parallelpiped, or right prism) where
the edges are aligned with the X, Y, and Z axes. This is basically equivalent to the FreeCAD
BoundBox object, but with way more attributes to access various points on the cuboid surface.
The basic attribute nomenclature is based on the compass points North (+Y), South (-Y),
East (+X) and West (-X). Two additional "compass" points called Top (+Z) and Bottom (-Z)
are introduced as well.
Thus:
* TNE represents the Top North East corner of the box.
* NE represents the center of the North East box edge.
* T represents the center of the top face of the box.
Attributes:
* Minimums/Maximums:
* XMax (float): The maximum X (East).
* XMin (float): The minimum X (West).
* YMax (float): The maximum Y (North).
* YMin (float): The minimum Y (South).
* ZMax (float): The maximum Z (Top).
* ZMin (float): The minimum Z (Bottom).
* The 6 face attributes:
* B (Vector): Center of bottom face.
* E (Vector): Center of east face.
* N (Vector): Center of north face.
* S (Vector): Center of south face.
* T (Vector): Center of top face.
* W (Vector): Center of west face.
* The 8 corner attributes:
* BNE (Vector): Bottom North East corner.
* BNW (Vector): Bottom North West corner.
* BSE (Vector): Bottom South East corner.
* BSW (Vector): Bottom South West corner.
* TNE (Vector): Top North East corner.
* TNW (Vector): Top North West corner.
* TSE (Vector): Top South East corner.
* TSW (Vector): Bottom South West corner.
* The 12 edge attributes:
* BE (Vector): Center of Bottom East edge.
* BN (Vector): Center of Bottom North edge.
* BS (Vector): Center of Bottom South edge.
* BW (Vector): Center of Bottom West edge.
* NE (Vector): Center of North East edge
* NW (Vector): Center of North West edge
* SE (Vector): Center of South East edge
* SW (Vector): Center of South West edge
* TE (Vector): Center of Top East edge.
* TN (Vector): Center of Top North edge.
* TS (Vector): Center of Top South edge.
* TW (Vector): Center of Top West edge.
* The other attributes:
* C (Vector): Center point.
* DB (Vector): Bottom direction (i.e. B - C)
* DE (Vector): East direction (i.e. E - C)
* DN (Vector): North direction (i.e. N - C)
* DS (Vector): South direction (i.e. S - C)
* DT (Vector): Top direction (i.e. T - C)
* DW (Vector): West direction (i.e. W - C)
* DX (float): X box length (i.e. (E - W).Length)
* DY (float): Y box length (i.e. (N - S).Length)
* DZ (float): Z box length (i.e. (T - B).Length)
"""
# These are in the same order as FreeCAD BoundBox:
_XMin: float = field(init=False, repr=False)
_YMin: float = field(init=False, repr=False)
_ZMin: float = field(init=False, repr=False)
_XMax: float = field(init=False, repr=False)
_YMax: float = field(init=False, repr=False)
_ZMax: float = field(init=False, repr=False)
# FabBox.__init__():
def __post_init__(self) -> None:
self._XMin = -1.0
self._XMax = 1.0
self._YMin = -1.0
self._YMax = 1.0
self._ZMin = -1.0
self._ZMax = 1.0
# FabBox.enclose():
def enclose(self, bounds: Sequence[Union[Vector, "FabBox"]]) -> None:
"""Initialize a FabBox.
Arguments:
* *bounds* (Sequence[Union[Vector, FabBox]]):
A sequence of points or boxes to enclose.
Raises:
* ValueError: For bad or empty corners.
"""
if not isinstance(bounds, (list, tuple)):
raise RuntimeError(
f"{bounds} is {str(type(bounds))}, not List/Tuple") # pragma: no unit cover
if not bounds:
raise RuntimeError("Bounds sequence is empty")
# Convert *corners* into *vectors*:
bound: Union[Vector, FabBox]
vectors: List[Vector] = []
for bound in bounds:
if isinstance(bound, Vector):
vectors.append(bound)
elif isinstance(bound, FabBox):
vectors.append(bound.TNE)
vectors.append(bound.BSW)
else: # pragma: no unit coverage
raise RuntimeError(
f"{bound} is {str(type(bound))}, not Vector/FabBox")
# Initialize with from the first vector:
vector0: Vector = vectors[0]
x_min: float = vector0.x
y_min: float = vector0.y
z_min: float = vector0.z
x_max: float = x_min
y_max: float = y_min
z_max: float = z_min
# Sweep through *vectors* expanding the box limits:
vector: Vector
for vector in vectors[1:]:
x: float = vector.x
y: float = vector.y
z: float = vector.z
x_max = max(x_max, x)
x_min = min(x_min, x)
y_max = max(y_max, y)
y_min = min(y_min, y)
z_max = max(z_max, z)
z_min = min(z_min, z)
self._XMin = x_min
self._YMin = y_min
self._ZMin = z_min
self._XMax = x_max
self._YMax = y_max
self._ZMax = z_max
# 6 Standard X/Y/Z min/max attributes:
# FabBox.XMin():
@property
def XMin(self) -> float:
return self._XMin
# FabBox.YMin():
@property
def YMin(self) -> float:
return self._YMin
# FabBox.ZMin():
@property
def ZMin(self) -> float:
return self._ZMin
# FabBox.XMax()
@property
def XMax(self) -> float:
return self._XMax
# FabBox.YMax()
@property
def YMax(self) -> float:
return self._YMax
# FabBox.ZMax()
@property
def ZMax(self) -> float:
return self._ZMax
# 6 Face attributes:
@property
def B(self) -> Vector:
"""Bottom face center."""
return Vector((self._XMin + self._XMax) / 2.0, (self._YMin + self._YMax) / 2.0, self._ZMin)
@property
def E(self) -> Vector:
"""East face center."""
return Vector(self._XMax, (self._YMin + self._YMax) / 2.0, (self._ZMin + self._ZMax) / 2.0)
@property
def N(self) -> Vector:
"""North face center."""
return Vector((self._XMin + self._XMax) / 2.0, self._YMax, (self._ZMin + self._ZMax) / 2.0)
@property
def S(self) -> Vector:
"""South face center."""
return Vector((self._XMin + self._XMax) / 2.0, self._YMin, (self._ZMin + self._ZMax) / 2.0)
@property
def T(self) -> Vector:
"""Top face center."""
return Vector((self._XMin + self._XMax) / 2.0, (self._YMin + self._YMax) / 2.0, self._ZMax)
@property
def W(self) -> Vector:
"""Center of bottom face."""
return Vector(self._XMin, (self._YMin + self._YMax) / 2.0, (self._ZMin + self._ZMax) / 2.0)
# 8 Corner attributes:
@property
def BNE(self) -> Vector:
"""Bottom North East corner."""
return Vector(self._XMax, self._YMax, self._ZMin)
@property
def BNW(self) -> Vector:
"""Bottom North West corner."""
return Vector(self._XMin, self._YMax, self._ZMin)
@property
def BSE(self) -> Vector:
"""Bottom South East corner."""
return Vector(self._XMax, self._YMin, self._ZMin)
@property
def BSW(self) -> Vector:
"""Bottom South West corner."""
return Vector(self._XMin, self._YMin, self._ZMin)
@property
def TNE(self) -> Vector:
"""Top North East corner."""
return Vector(self._XMax, self._YMax, self._ZMax)
@property
def TNW(self) -> | |
= 95
elif newPreNum <= 9:
newWidth = 115
elif newPreNum <= 12:
newWidth = 130
else:
newWidth = 155
cmds.channelBox(self.channelBoxID, edit=True, pre=newPreNum, fieldWidth=newWidth)
cmds.deleteUI('setPrecisionNumber')
def hyperbolicSet(self, *args):
hyperbolicCheck = cmds.menuItem('hyperCheckBox', query=True, checkBox=True)
if hyperbolicCheck == True:
cmds.channelBox(self.channelBoxID, e=True, hyp=True)
if hyperbolicCheck == False:
cmds.channelBox(self.channelBoxID, e=True, hyp=False)
def speedSlowSet(self, *args):
cmds.channelBox(self.channelBoxID, e=True, spd=0.1)
def speedNormalSet(self, *args):
cmds.channelBox(self.channelBoxID, e=True, spd=1)
def speedFastSet(self, *args):
cmds.channelBox(self.channelBoxID, e=True, spd=10)
def niceNameSet(self, *args):
cmds.channelBox(self.channelBoxID, e=True, nn=True, ln=False)
def longNameSet(self, *args):
cmds.channelBox(self.channelBoxID, e=True, nn=False, ln=True)
def shortNameSet(self, *args):
cmds.channelBox(self.channelBoxID, e=True, nn=False, ln=False)
def channelBoxCommand(self, operation, *args):
channelSel = cmds.channelBox(self.channelBoxID, query=True, sma=True)
objSel = cmds.ls(sl=True)
# reset default channels
transformChannels = ["translateX", "translateY", "translateZ", "rotateX", "rotateY", "rotateZ"]
scaleChannels = ["scaleX", "scaleY", "scaleZ", "visibility"]
if (operation == "-channelEditor"):
mel.eval("lockingKeyableWnd;")
elif (operation == "-setAllToZero"):
for obj in objSel:
for channel in transformChannels:
cmds.setAttr(obj + "." + channel, 0)
for channel in scaleChannels:
cmds.setAttr(obj + "." + channel, 1)
# reset created channels
for obj in objSel:
createdChannels = []
allChannels = cmds.listAnimatable(obj)
for channel in allChannels:
attrName = channel.split(".")[-1]
createdChannels.append(attrName)
channels = list(set(createdChannels) - set(transformChannels) - set(scaleChannels))
for channel in channels:
defaultValue = cmds.addItem(obj + "." + channel, query=True, dv=True)
cmds.setAttr(obj + "." + channel, defaultValue)
elif (operation == "-keySelected"):
for obj in objSel:
for channel in channelSel:
cmds.setKeyframe(obj + "." + channel)
elif (operation == "-keyAll"):
for obj in objSel:
allChannels = cmds.listAnimatable(obj)
cmds.select(obj)
for channel in allChannels:
cmds.setKeyframe(channel)
elif (operation == "-breakDownSelected"):
for obj in objSel:
for channel in channelSel:
cmds.setKeyframe(obj + "." + channel, breakdown=True)
elif (operation == "-breakDownAll"):
for obj in objSel:
allChannels = cmds.listAnimatable(obj)
cmds.select(obj)
for channel in allChannels:
cmds.setKeyframe(channel, breakdown=True)
elif (operation == "-cutSelected") or (operation == "-deleteSelected"):
for obj in objSel:
for channel in channelSel:
cmds.cutKey(obj, at=channel)
elif (operation == "-copySelected"):
for obj in objSel:
for channel in channelSel:
cmds.copyKey(obj, at=channel)
elif (operation == "-pasteSelected"):
for obj in objSel:
for channel in channelSel:
cmds.pasteKey(obj, connect=True, at=channel)
elif (operation == "-breakConnection"):
for obj in objSel:
for channel in channelSel:
attr = obj + "." + channel
mel.eval("source channelBoxCommand; CBdeleteConnection \"%s\"" % attr)
elif (operation == "-lockSelected"):
for obj in objSel:
for channel in channelSel:
cmds.setAttr(obj + "." + channel, lock=True)
elif (operation == "-unlockSelected"):
for obj in objSel:
for channel in channelSel:
cmds.setAttr(obj + "." + channel, lock=False)
elif (operation == "-hideSelected"):
for obj in objSel:
for channel in channelSel:
cmds.setAttr(obj + "." + channel, keyable=False, channelBox=False)
elif (operation == "-lockAndHideSelected"):
for obj in objSel:
for channel in channelSel:
cmds.setAttr(obj + "." + channel, lock=True)
cmds.setAttr(obj + "." + channel, keyable=False, channelBox=False)
elif (operation == "-unhideHided"):
# channelBoxChannels = transformChannels + scaleChannels
for obj in objSel:
# for channel in channelBoxChannels:
# cmds.setAttr( obj + "." + channel, l=False, k=True )
# get locked channel
lockChannels = cmds.listAttr(obj, locked=True)
if lockChannels == None:
message = "nothing is locked"
self.warningPopup(message)
break
else:
for channel in lockChannels:
cmds.setAttr(obj + "." + channel, keyable=True, channelBox=True)
elif (operation == "-showDefault"):
for obj in objSel:
defaultChannel = ["tx", "ty", "tz", "rx", "ry", "rz", "sx", "sy", "sz"]
for channel in defaultChannel:
cmds.setAttr(obj + "." + channel, k=True, cb=True)
elif (operation == "-expression"):
mel.eval('expressionEditor EE "" "";')
elif (operation == "-unhideHided"):
mel.eval('SetDrivenKeyOptions;')
elif (operation == "-deleteAttribute"):
for obj in objSel:
for channel in channelSel:
cmds.deleteAttr(obj, at=channel)
elif (operation == "-about"):
cmds.confirmDialog(t="About DAMG Controller Maker",
m=("Thank you for using my script :D\n"
"Made by <NAME> - JimJim\n"
"Please feel free to give your feedback\n"
"Email me: <EMAIL>\n"),
b="Close")
# ----------------------------------------------------------------------------------------------------------- #
""" MAIN CLASS: DAMG TOOL BOX II - ALL ABOUT CONTROLLER UI """
# ----------------------------------------------------------------------------------------------------------- #
"""
A DRAFT PREVIS FOR UI
WARNING: Change preVis here before changing code, or at least update it after changed the UI.
It helps me easier in calculating or considering all the measurement, variables,
as well as how I handle innovating UI quickly and accurately. Actually, it's saving my time.
(w) 4 3 4 1
| | | |
W |---------------------||-----------------||----------------------------||---------------------|
H Y
X 1 2 | 3 4 || 5 6 7 || 8 9 10 11 || 12
(h) - ---------------------------------------------------------------------------------------------
1----| 1 || USER ASSETS CONTROLLER MANAGER CHANNEL BOX ||
- | --------------------- ----------------------------------------------- --------------------- |
1----| 2 || txtFld | btn1 || txt | mnOp | btn2 | txt | mnOp | btn3 | btn4 || ||
_ ||---------------------||-----------------------------------------------|| ||
| || || || ||
| || || || ||
1----| 3 || QListWiget || || ||
| || || QGidWidget || ||
| || || || ||
- ||-------------------- || || ||
1----| 4 || btn5 btn6 btn7 || || ||
- | -------------------- ------------------------------------------------ | ||
1----| 5 || txt txt || txt || txt || ||
- | -------------------- ------------------ -------- ------------------- | ||
| || iconbtn | iconbtn || btn || btn8 optionMenu textField || ||
| || | || ||----------------------------|| ||
| || | || || cb btn9 btn10 btn11 || ||
| || | || ||----------------------------|| ||
| || | || || txt txtFld || ||
1----| 6 || | || || txt txtFld || ||
| || | || ||----------------------------|| ||
| || | || || txt txt txt txt || ||
| || | || || txtFld txtFld txtFld mnOp || ||
| || | || || btn13 || ||
| || | || btn12 || btn14 btn15 btn16 btn17 || ||
- | --------------------- ----------------- ---------------------------- --------------------- |
1----| 7 | btn18 btn19 btn20 || |
- -----------------------------------------------------------------------------------------------
|| | || || || ||
btn| 1 2 3 4 5 6 7 8 9 10 11 12 13 14
| Save |Create|Create|Refresh|Import|Refresh|Remove|Create| X | Y | Z |autoColor|AddA|leftHand|
| 15 16 17 18 19 20
|leftFoot|RightHand|RightFoot|Group|Cen.Piv|Frez.T|
[x, y, height, width] = [X, Y, H, W]
"""
class ToolBoxII(QtWidgets.QWidget):
"""
The DAMGtoolBoxII is a dialog that lets us save and import controllers,
also have functions to help user getting easier to modify or plt_model nurbs.
"""
# --------------------------------------------------------------------------------------------------------
# DICTIONARY TO STORE BINDATA TO MAKE CONTROLLERS SHOW IN DAMG CONTROLLER LIBRARY SECTION
# --------------------------------------------------------------------------------------------------------
# 2D nurbs types
nurbsType2D = {
'Arrow Curve': 'arrowCurve.icon.png', 'Plus Nurbs 2': 'boldPlusNurbs.icon.png',
'Clock Arrow Up': 'clockArrowUp.icon.png', 'Clock Arrow Down': 'clockArrowDown.icon.png',
'Female Symbol': 'femaleSymbol.icon.png', 'Male Symbol': 'maleSymbol.icon.png',
'Two directions': 'twoDirections.icon.png', 'Style Arrow 2D': 'twoDstyleArrow.icon.png',
'Lip Control': 'lipControl.icon.png', 'Upper Lip Control': 'upperLipControl.icon.png',
'Eyes Control': 'eyeControl.icon.png', 'Circle Plus': 'circlePlus.icon.png',
'Bold Circle 2D': 'twoDboldCircle.icon.png', 'Bear Foot Control': 'bearFootControl.icon.png',
'Fist Curve': "fistCurve.icon.png", 'Hand Nurbs': 'handNurbs.icon.png',
'Foot Control 1': "footControl1.icon.png", 'Foot Control 2': 'footControl2.icon.png',
'Circle Arrow 2D': 'twoDcircleArrow.icon.png', 'Slider Control': "sliderControl.icon.png",
'Master Control': 'masterControl.icon.png', 'Fan 5 Wings': 'fiveWingsFan.icon.png',
'Move Control 2': "moveControl1.icon.png", 'Cross Control': "crossControl.icon.png",
'Move Control 1': 'moveControl2.icon.png', 'Plus Nurbs 1': 'plusNurbs.icon.png'
}
# 3D nurbs types
nurbsType3D = {
'Crown Curve': 'crownCurve.icon.png', 'Cube Nurbs': 'cubeCurve.icon.png',
'Cube Nurbs on base': "cubeOnBase.icon.png", 'Nail Arrow Up': 'nailArrowUp.icon.png',
'Rotation Control 1': "rotationControl.icon.png", 'Nail Arrow Down': 'nailArrowDown.icon.png',
'Diamond Control': "diamond.icon.png", 'Single Rotation': "singleRotateControl.icon.png",
'Shere Control': "sphereControl.icon.png", 'Spike Cross Control': "spikeCrossControl.icon.png",
'Pyramid': 'pyramid.icon.png', 'Four Sides Arrow': 'fourSidesArrow.icon.png',
'Origin Control': 'orginControl.icon.png', 'Circle Arrow 3D': 'threeDcircleArrow.icon.png',
'Arrow Both Sides': 'arrowBothSide.icon.png', 'Style Arrow 3D': 'threeDstyleArrow.icon.png',
'Jaw Control': 'headJawControl.icon.png', 'Two Way Arrow': 'twoWayArrow.icon.png',
'Locator Control': 'locatorControl.icon.png', 'Sphere Square': 'sphereSquare.icon.png',
'Ear Control': 'earControl.icon.png', 'Half Sphere': 'halfSphere.icon.png',
'Rotation Control 2': 'twoAxisRotation.icon.png', 'Fish Nail': 'fishNail.icon.png',
'Cylinder Nurbs': 'cylinderCurve.icon.png', 'Point Mark': 'pointNote.icon.png',
'Tongue Control': 'tongueControl.icon.png', 'Zig Zag Circle': 'zigZagCircle.icon.png'
}
# get the paths of plt.maya.icon folder
scrIcons = os.path.join(os.getenv(__root__), 'imgs', 'maya.icon')
def __init__(self, dock=True):
if dock:
parent = getDock()
else:
deleteDock()
try:
cmds.deleteUI('DAMGtoolBoxII')
except:
logger.debug('No previous UI exists')
parent = QtWidgets.QDialog(parent=getMayaMainWindow())
parent.setObjectName('DAMGtoolBoxII')
parent.setWindowTitle('DAMG Tool Box II - Nurbs/Curver/Controller AIO')
self.layout = QtWidgets.QVBoxLayout(parent)
super(ToolBoxII, self).__init__(parent=parent)
# the library variable points to an instance of our controller library
self.library = ControllerLibrary()
# every time we create a showLayout_new instance, we will automatically build our UI and populate it
self.buildUI()
self.populateLibrarySection()
| |
[
[217.3, 264.2, 249.4, 296.9, 208.4, 232.0, 288.0, 299.4],
[258.0, 218.9, 205.5, 279.4, 293.8, 260.4, 228.3, 224.0],
[210.6, 217.2, 241.7, 201.7, 215.0, 255.9, 241.0, 240.8],
[256.2, 305.1, 293.5, 253.9, 271.0, 248.8, 206.2, 305.7],
[275.0, 301.1, 284.7, 227.8, 252.3, 231.2, 214.9, 243.8],
],
[
[307.1, 206.8, 207.1, 260.3, 257.4, 310.1, 287.4, 242.8],
[291.5, 266.9, 302.8, 232.3, 283.1, 207.8, 249.3, 252.4],
[207.4, 222.4, 218.9, 266.7, 214.4, 227.9, 254.5, 310.7],
[232.6, 248.7, 257.5, 243.6, 261.9, 220.7, 294.0, 286.5],
[286.3, 262.3, 202.2, 279.2, 257.1, 230.2, 250.6, 225.3],
],
[
[299.3, 268.7, 296.3, 199.9, 254.3, 295.7, 275.3, 271.8],
[250.6, 226.6, 301.3, 207.4, 242.9, 273.1, 216.1, 252.0],
[275.8, 291.3, 270.6, 282.9, 250.5, 291.3, 260.6, 310.1],
[253.2, 221.3, 281.1, 283.0, 268.0, 263.9, 224.3, 284.0],
[236.5, 218.9, 229.2, 227.9, 226.2, 247.3, 298.1, 226.8],
],
[
[215.9, 289.9, 222.7, 270.5, 247.7, 200.7, 219.0, 252.4],
[202.8, 278.9, 259.1, 207.2, 299.8, 249.2, 259.8, 200.7],
[249.3, 205.9, 303.5, 304.2, 216.8, 308.1, 201.5, 241.9],
[256.9, 264.6, 227.4, 229.5, 294.2, 271.0, 254.5, 274.6],
[268.1, 199.3, 275.7, 289.0, 205.0, 218.2, 270.6, 280.4],
],
[
[290.2, 274.0, 281.7, 263.1, 202.1, 199.7, 228.1, 260.0],
[248.7, 305.0, 306.2, 255.3, 298.0, 254.6, 276.0, 249.4],
[217.2, 272.4, 278.8, 252.1, 236.4, 223.6, 201.8, 300.9],
[302.4, 305.0, 273.1, 261.9, 241.4, 285.0, 275.1, 210.2],
[242.1, 208.1, 258.0, 222.2, 244.7, 236.9, 216.0, 260.5],
],
[
[239.9, 220.7, 246.1, 209.0, 247.9, 247.4, 227.1, 291.7],
[205.5, 287.2, 305.5, 238.8, 291.1, 250.0, 202.0, 234.0],
[275.4, 210.0, 276.8, 287.3, 281.2, 279.6, 306.0, 228.3],
[301.9, 295.9, 298.4, 304.0, 227.9, 301.7, 296.2, 247.4],
[210.1, 212.0, 275.1, 271.8, 254.0, 274.8, 283.8, 286.6],
],
],
units="K",
dtype="f8",
)
f.set_data(data, axes=("domainaxis0", "domainaxis1", "domainaxis2"))
# dimension_coordinate
c = DimensionCoordinate()
c.set_properties(
{"standard_name": "time", "units": "days since 1959-01-01"}
)
c.nc_set_variable("time")
data = Data(
[
349.5,
380.5,
410.5,
440.5,
471.0,
501.5,
532.0,
562.5,
593.5,
624.0,
654.5,
685.0,
715.5,
746.5,
776.0,
805.5,
836.0,
866.5,
897.0,
927.5,
958.5,
989.0,
1019.5,
1050.0,
1080.5,
1111.5,
1141.0,
1170.5,
1201.0,
1231.5,
1262.0,
1292.5,
1323.5,
1354.0,
1384.5,
1415.0,
],
units="days since 1959-01-01",
dtype="f8",
)
c.set_data(data)
b = Bounds()
b.nc_set_variable("bounds")
data = Data(
[
[334.0, 365.0],
[365.0, 396.0],
[396.0, 425.0],
[425.0, 456.0],
[456.0, 486.0],
[486.0, 517.0],
[517.0, 547.0],
[547.0, 578.0],
[578.0, 609.0],
[609.0, 639.0],
[639.0, 670.0],
[670.0, 700.0],
[700.0, 731.0],
[731.0, 762.0],
[762.0, 790.0],
[790.0, 821.0],
[821.0, 851.0],
[851.0, 882.0],
[882.0, 912.0],
[912.0, 943.0],
[943.0, 974.0],
[974.0, 1004.0],
[1004.0, 1035.0],
[1035.0, 1065.0],
[1065.0, 1096.0],
[1096.0, 1127.0],
[1127.0, 1155.0],
[1155.0, 1186.0],
[1186.0, 1216.0],
[1216.0, 1247.0],
[1247.0, 1277.0],
[1277.0, 1308.0],
[1308.0, 1339.0],
[1339.0, 1369.0],
[1369.0, 1400.0],
[1400.0, 1430.0],
],
units="days since 1959-01-01",
dtype="f8",
)
b.set_data(data)
c.set_bounds(b)
f.set_construct(
c, axes=("domainaxis0",), key="dimensioncoordinate0", copy=False
)
# dimension_coordinate
c = DimensionCoordinate()
c.set_properties(
{"units": "degrees_north", "standard_name": "latitude"}
)
c.nc_set_variable("lat")
data = Data(
[-75.0, -45.0, 0.0, 45.0, 75.0], units="degrees_north", dtype="f8"
)
c.set_data(data)
b = Bounds()
b.nc_set_variable("lat_bnds")
data = Data(
[
[-90.0, -60.0],
[-60.0, -30.0],
[-30.0, 30.0],
[30.0, 60.0],
[60.0, 90.0],
],
units="degrees_north",
dtype="f8",
)
b.set_data(data)
c.set_bounds(b)
f.set_construct(
c, axes=("domainaxis1",), key="dimensioncoordinate1", copy=False
)
# dimension_coordinate
c = DimensionCoordinate()
c.set_properties(
{"units": "degrees_east", "standard_name": "longitude"}
)
c.nc_set_variable("lon")
data = Data(
[22.5, 67.5, 112.5, 157.5, 202.5, 247.5, 292.5, 337.5],
units="degrees_east",
dtype="f8",
)
c.set_data(data)
b = Bounds()
b.nc_set_variable("lon_bnds")
data = Data(
[
[0.0, 45.0],
[45.0, 90.0],
[90.0, 135.0],
[135.0, 180.0],
[180.0, 225.0],
[225.0, 270.0],
[270.0, 315.0],
[315.0, 360.0],
],
units="degrees_east",
dtype="f8",
)
b.set_data(data)
c.set_bounds(b)
f.set_construct(
c, axes=("domainaxis2",), key="dimensioncoordinate2", copy=False
)
# dimension_coordinate
c = DimensionCoordinate()
c.set_properties({"standard_name": "air_pressure", "units": "hPa"})
c.nc_set_variable("air_pressure")
data = Data([850.0], units="hPa", dtype="f8")
c.set_data(data)
f.set_construct(
c, axes=("domainaxis3",), key="dimensioncoordinate3", copy=False
)
# cell_method
c = CellMethod()
c.set_method("mean")
c.set_axes("area")
f.set_construct(c)
elif n == 5:
f = Field()
f.set_properties(
{
"Conventions": "CF-" + CF(),
"standard_name": "air_potential_temperature",
"units": "K",
}
)
f.nc_set_variable("air_potential_temperature")
f.nc_set_global_attributes({"Conventions": None})
# domain_axis
c = DomainAxis(size=118)
c.nc_set_dimension("time")
f.set_construct(c, key="domainaxis0")
# domain_axis
c = DomainAxis(size=5)
c.nc_set_dimension("lat")
f.set_construct(c, key="domainaxis1")
# domain_axis
c = DomainAxis(size=8)
c.nc_set_dimension("lon")
f.set_construct(c, key="domainaxis2")
# domain_axis
c = DomainAxis(size=1)
f.set_construct(c, key="domainaxis3")
# field data
data = Data(
[
[
[274.0, 282.2, 267.4, 275.3, 274.3, 280.0, 281.9, 266.7],
[263.9, 268.6, 276.9, 275.8, 271.1, 277.3, 270.2, 278.0],
[266.5, 267.9, 267.8, 268.2, 257.7, 263.5, 273.9, 281.0],
[274.9, 265.2, 284.2, 275.5, 254.3, 277.4, 258.1, 273.3],
[279.9, 291.9, 273.9, 285.8, 277.1, 271.3, 273.4, 261.8],
],
[
[292.2, 275.7, 286.9, 287.2, 269.2, 266.1, 286.4, 272.4],
[276.9, 269.2, 290.9, 283.1, 268.2, 274.0, 267.8, 253.2],
[285.5, 288.9, 261.7, 278.6, 287.6, 269.0, 277.2, 279.1],
[264.1, 256.4, 278.1, 272.1, 271.4, 277.2, 271.2, 274.5],
[277.5, 290.1, 282.4, 264.7, 277.8, 257.5, 261.3, 284.6],
],
[
[277.2, 279.8, 259.0, 267.0, 277.9, 282.0, 268.7, 277.7],
[280.2, 256.7, 264.0, 272.8, 284.2, 262.6, 292.9, 273.4],
[256.3, 276.7, 280.7, 258.6, 267.7, 260.7, 273.3, 273.7],
[278.9, 279.4, 276.5, 272.7, 271.3, 260.1, 287.4, 278.9],
[288.7, 278.6, 284.2, 277.1, 283.8, 283.5, 262.4, 268.1],
],
[
[266.4, 277.7, 279.8, 271.1, 257.1, 286.9, 277.9, 261.5],
[277.6, 273.5, 261.1, 280.8, 280.1, 266.0, 270.8, 256.0],
[281.0, 290.1, 263.1, 274.8, 288.2, 277.2, 278.8, 260.4],
[248.1, 285.8, 274.2, 268.1, 279.6, 278.1, 262.3, 286.0],
[274.3, 272.8, 276.4, 281.7, 258.1, 275.2, 259.4, 279.5],
],
[
[266.7, 259.6, 257.7, 265.6, 259.3, 256.6, 255.7, 285.6],
[283.4, 274.8, 268.7, 277.2, 265.2, 281.5, 282.5, 258.1],
[284.2, 291.0, 268.9, 260.0, 281.3, 266.9, 274.6, 289.2],
[279.4, 284.7, 266.6, 285.6, 275.1, 284.8, 286.4, 284.3],
[269.1, 273.3, 272.8, 279.9, 283.2, 285.5, 258.1, 261.7],
],
[
[296.4, 281.1, 278.6, 273.2, 288.7, 281.3, 265.6, 284.2],
[276.8, 271.7, 274.4, 271.1, 279.9, 265.4, 292.5, 259.7],
[278.7, 279.6, 277.0, 270.7, 266.1, 265.2, 272.5, 278.1],
[284.0, 254.0, 279.7, 291.1, 282.4, 279.3, 257.6, 285.1],
[272.2, 283.6, 270.0, 271.9, 294.8, 260.4, 275.9, 275.5],
],
[
[273.5, 273.9, 298.2, 275.1, 268.2, 260.2, 260.5, 272.5],
[264.7, 241.2, 261.1, 260.3, 272.8, 285.2, 283.7, 275.5],
[273.2, 256.0, 282.9, 272.0, 253.9, 291.0, 267.5, 272.1],
[259.6, 262.7, 278.5, 271.6, 260.1, 273.3, 286.1, 267.7],
[266.3, 262.5, 273.4, 278.9, 274.9, 267.1, 274.6, 286.1],
],
[
[278.1, 269.1, 271.4, 266.1, 258.6, 281.9, 256.9, 281.7],
[275.0, 281.3, 256.7, 252.4, 281.5, 273.6, 259.1, 266.1],
[264.1, 266.0, 278.9, 267.4, 286.4, 281.7, 270.2, 266.1],
[274.2, 261.9, 270.1, 291.9, 292.1, 277.6, 283.6, 279.4],
[281.4, 270.6, 255.5, 269.4, 264.8, 262.4, 275.3, 286.9],
],
[
[269.6, 269.8, 270.8, 270.1, 277.6, 271.0, 263.6, 274.5],
[259.0, 251.2, 295.4, 262.1, 262.6, 283.3, 269.0, 268.1],
[286.0, 275.6, 264.2, 265.3, 263.7, 268.6, 269.9, 281.2],
[269.6, 274.9, 256.7, 284.1, 271.1, 254.8, 258.1, 273.0],
[269.2, 271.2, 275.3, 279.8, 278.7, 278.8, 280.5, 285.8],
],
[
[285.9, 246.7, 255.7, 282.1, 258.9, 262.6, 300.0, 288.3],
[279.2, 266.6, 271.0, 258.5, 257.9, 274.8, 264.4, 267.4],
[282.0, 266.8, 280.1, 289.6, 278.2, 282.2, 288.7, 273.6],
[279.2, 273.1, 266.7, 271.5, 275.4, 278.2, 269.7, 291.6],
[281.1, 282.1, 271.3, 260.0, 275.7, 291.4, 266.6, 265.2],
],
[
[278.5, 263.3, 285.9, 255.2, 283.8, 288.7, 282.8, 262.9],
[272.6, 288.2, 257.5, 269.8, 273.6, 266.7, 276.2, 275.5],
[261.8, 274.2, 278.3, 273.3, 268.8, 278.5, 273.0, 276.7],
[276.2, 271.2, 284.8, 272.2, 274.7, 253.6, 268.7, 273.1],
[285.6, 270.2, 271.4, 285.5, 248.1, 273.1, 294.9, 272.9],
],
[
[269.3, 252.5, 271.3, 268.2, 270.8, 282.2, 275.0, 274.2],
[257.5, 295.6, 278.0, 284.6, 277.3, 277.3, 273.1, 278.8],
[279.8, 286.5, 259.5, 287.2, 276.8, 282.0, 265.9, 283.8],
[276.2, 282.4, 252.7, 265.5, 252.9, 274.6, 265.5, 274.1],
[269.8, 267.4, 292.5, 256.7, 274.3, 278.9, 270.3, 252.5],
],
[
[287.4, 275.6, 287.9, 284.6, 281.7, 280.5, 267.8, 283.9],
[292.0, 286.4, 276.1, 277.8, 280.8, 268.4, 281.6, 262.4],
[260.7, 265.9, 274.9, 275.9, 277.3, 286.2, 296.4, 280.1],
[251.1, 283.6, 265.0, 280.6, 254.8, 251.6, 275.2, 279.2],
[273.3, 272.0, 254.3, 287.5, 275.3, 282.1, 272.6, 266.8],
],
[
[279.6, 268.3, 280.6, 267.5, 260.8, 268.2, 276.2, 247.4],
[268.1, 275.0, 278.7, 265.8, 283.8, 271.6, 284.5, 276.6],
[269.7, 270.1, 274.9, 252.2, 285.5, 254.3, 266.2, 270.6],
[274.1, 273.7, 269.4, 262.9, 281.7, 282.7, 270.0, 264.8],
[280.7, 265.3, 291.6, 281.2, 273.1, 273.5, 291.3, 274.4],
],
[
[270.4, 273.8, 260.8, 262.9, 268.9, 278.1, 261.7, 257.3],
[262.4, 261.2, 265.5, 276.6, 264.4, 271.6, 272.9, 273.3],
[247.1, 271.7, 272.0, 279.3, 269.3, 255.2, 279.9, 272.8],
[291.6, 279.5, 263.2, 285.0, 263.5, 257.0, 274.2, 270.1],
[261.5, 270.7, 280.2, 264.5, 267.0, 260.3, 277.4, 288.1],
],
[
[261.5, 285.4, 275.3, 276.7, 279.4, 269.1, 264.1, 254.2],
[262.1, 272.5, 262.2, 275.6, 276.1, 269.9, 263.3, 281.2],
[287.1, 276.5, 285.9, 267.1, 274.2, 269.0, 265.3, 281.2],
[265.7, 278.5, 251.2, | |
np.mean(beta):
azim = -60
unit = 'Nm'
scale = 1
if np.min(torque) < -9.9e3 or np.max(torque) > 9.9e3:
scale = 1e-3
unit = 'kNm'
if title:
_plot_surface(ax, i1, beta, scale*np.asarray(torque),
(u'I1/A', u'Beta/°', title),
azim=azim)
else:
_plot_surface(ax, i1, beta, scale*np.asarray(torque),
(u'I1/A', u'Beta/°', u'Torque/{}'.format(unit)),
azim=azim)
def i1beta_ld(i1, beta, ld, ax=0):
"""creates a surface plot of ld vs i1, beta"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
_plot_surface(ax, i1, beta, np.asarray(ld)*1e3,
(u'I1/A', u'Beta/°', u'Ld/mH'),
azim=60)
def i1beta_lq(i1, beta, lq, ax=0):
"""creates a surface plot of ld vs i1, beta"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
azim = 60
if 0 < np.mean(beta) or -90 > np.mean(beta):
azim = -120
_plot_surface(ax, i1, beta, np.asarray(lq)*1e3,
(u'I1/A', u'Beta/°', u'Lq/mH'),
azim=azim)
def i1beta_psim(i1, beta, psim, ax=0):
"""creates a surface plot of psim vs i1, beta"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
_plot_surface(ax, i1, beta, psim,
(u'I1/A', u'Beta/°', u'Psi m/Vs'),
azim=60)
def i1beta_up(i1, beta, up, ax=0):
"""creates a surface plot of up vs i1, beta"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
_plot_surface(ax, i1, beta, up,
(u'I1/A', u'Beta/°', u'Up/V'),
azim=60)
def i1beta_psid(i1, beta, psid, ax=0):
"""creates a surface plot of psid vs i1, beta"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
azim = -60
if 0 < np.mean(beta) or -90 > np.mean(beta):
azim = 60
_plot_surface(ax, i1, beta, psid,
(u'I1/A', u'Beta/°', u'Psi d/Vs'),
azim=azim)
def i1beta_psiq(i1, beta, psiq, ax=0):
"""creates a surface plot of psiq vs i1, beta"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
azim = 210
if 0 < np.mean(beta) or -90 > np.mean(beta):
azim = -60
_plot_surface(ax, i1, beta, psiq,
(u'I1/A', u'Beta/°', u'Psi q/Vs'),
azim=azim)
def idq_torque(id, iq, torque, ax=0):
"""creates a surface plot of torque vs id, iq"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
unit = 'Nm'
scale = 1
if np.min(torque) < -9.9e3 or np.max(torque) > 9.9e3:
scale = 1e-3
unit = 'kNm'
_plot_surface(ax, id, iq, scale*np.asarray(torque),
(u'Id/A', u'Iq/A', u'Torque/{}'.format(unit)),
azim=-60)
return ax
def idq_psid(id, iq, psid, ax=0):
"""creates a surface plot of psid vs id, iq"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
_plot_surface(ax, id, iq, psid,
(u'Id/A', u'Iq/A', u'Psi d/Vs'),
azim=210)
def idq_psiq(id, iq, psiq, ax=0):
"""creates a surface plot of psiq vs id, iq"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
_plot_surface(ax, id, iq, psiq,
(u'Id/A', u'Iq/A', u'Psi q/Vs'),
azim=210)
def idq_psim(id, iq, psim, ax=0):
"""creates a surface plot of psim vs. id, iq"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
_plot_surface(ax, id, iq, psim,
(u'Id/A', u'Iq/A', u'Psi m [Vs]'),
azim=120)
def idq_ld(id, iq, ld, ax=0):
"""creates a surface plot of ld vs. id, iq"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
_plot_surface(ax, id, iq, np.asarray(ld)*1e3,
(u'Id/A', u'Iq/A', u'L d/mH'),
azim=120)
def idq_lq(id, iq, lq, ax=0):
"""creates a surface plot of lq vs. id, iq"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
_plot_surface(ax, id, iq, np.asarray(lq)*1e3,
(u'Id/A', u'Iq/A', u'L q/mH'),
azim=120)
def ldlq(bch):
"""creates the surface plots of a BCH reader object
with a ld-lq identification"""
beta = bch.ldq['beta']
i1 = bch.ldq['i1']
torque = bch.ldq['torque']
ld = np.array(bch.ldq['ld'])
lq = np.array(bch.ldq['lq'])
psid = bch.ldq['psid']
psiq = bch.ldq['psiq']
rows = 3
fig = plt.figure(figsize=(10, 4*rows))
fig.suptitle('Ld-Lq Identification {}'.format(bch.filename), fontsize=16)
fig.add_subplot(rows, 2, 1, projection='3d')
i1beta_torque(i1, beta, torque)
fig.add_subplot(rows, 2, 2, projection='3d')
i1beta_psid(i1, beta, psid)
fig.add_subplot(rows, 2, 3, projection='3d')
i1beta_psiq(i1, beta, psiq)
fig.add_subplot(rows, 2, 4, projection='3d')
try:
i1beta_psim(i1, beta, bch.ldq['psim'])
except:
i1beta_up(i1, beta, bch.ldq['up'])
fig.add_subplot(rows, 2, 5, projection='3d')
i1beta_ld(i1, beta, ld)
fig.add_subplot(rows, 2, 6, projection='3d')
i1beta_lq(i1, beta, lq)
def psidq(bch):
"""creates the surface plots of a BCH reader object
with a psid-psiq identification"""
id = bch.psidq['id']
iq = bch.psidq['iq']
torque = bch.psidq['torque']
ld = np.array(bch.psidq_ldq['ld'])
lq = np.array(bch.psidq_ldq['lq'])
psim = bch.psidq_ldq['psim']
psid = bch.psidq['psid']
psiq = bch.psidq['psiq']
rows = 3
fig = plt.figure(figsize=(10, 4*rows))
fig.suptitle('Psid-Psiq Identification {}'.format(
bch.filename), fontsize=16)
fig.add_subplot(rows, 2, 1, projection='3d')
idq_torque(id, iq, torque)
fig.add_subplot(rows, 2, 2, projection='3d')
idq_psid(id, iq, psid)
fig.add_subplot(rows, 2, 3, projection='3d')
idq_psiq(id, iq, psiq)
fig.add_subplot(rows, 2, 4, projection='3d')
idq_psim(id, iq, psim)
fig.add_subplot(rows, 2, 5, projection='3d')
idq_ld(id, iq, ld)
fig.add_subplot(rows, 2, 6, projection='3d')
idq_lq(id, iq, lq)
def felosses(losses, coeffs, title='', log=True, ax=0):
"""plot iron losses with steinmetz or jordan approximation
Args:
losses: dict with f, B, pfe values
coeffs: list with steinmetz (cw, alpha, beta) or
jordan (cw, alpha, ch, beta, gamma) coeffs
title: title string
log: log scale for x and y axes if True
"""
import femagtools.losscoeffs as lc
if ax == 0:
ax = plt.gca()
fo = losses['fo']
Bo = losses['Bo']
B = plt.np.linspace(0.9*np.min(losses['B']),
1.1*0.9*np.max(losses['B']))
for i, f in enumerate(losses['f']):
pfe = [p for p in np.array(losses['pfe'])[i] if p]
if f > 0:
if len(coeffs) == 5:
ax.plot(B, lc.pfe_jordan(f, B, *coeffs, fo=fo, Bo=Bo))
elif len(coeffs) == 3:
ax.plot(B, lc.pfe_steinmetz(f, B, *coeffs, fo=fo, Bo=Bo))
plt.plot(losses['B'][:len(pfe)], pfe,
marker='o', label="{} Hz".format(f))
ax.set_title("Fe Losses/(W/kg) " + title)
if log:
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlabel("Flux Density [T]")
# plt.ylabel("Pfe [W/kg]")
ax.legend()
ax.grid(True)
def spel(isa, with_axis=False, ax=0):
"""plot super elements of I7/ISA7 model
Args:
isa: Isa7 object
"""
from matplotlib.patches import Polygon
if ax == 0:
ax = plt.gca()
ax.set_aspect('equal')
for se in isa.superelements:
ax.add_patch(Polygon([n.xy
for nc in se.nodechains
for n in nc.nodes],
color=isa.color[se.color], lw=0))
ax.autoscale(enable=True)
if not with_axis:
ax.axis('off')
def mesh(isa, with_axis=False, ax=0):
"""plot mesh of I7/ISA7 model
Args:
isa: Isa7 object
"""
from matplotlib.lines import Line2D
if ax == 0:
ax = plt.gca()
ax.set_aspect('equal')
for el in isa.elements:
pts = [list(i) for i in zip(*[v.xy for v in el.vertices])]
ax.add_line(Line2D(pts[0], pts[1], color='b', ls='-', lw=0.25))
# for nc in isa.nodechains:
# pts = [list(i) for i in zip(*[(n.x, n.y) for n in nc.nodes])]
# ax.add_line(Line2D(pts[0], pts[1], color="b", ls="-", lw=0.25,
# marker=".", ms="2", mec="None"))
# for nc in isa.nodechains:
# if nc.nodemid is not None:
# plt.plot(*nc.nodemid.xy, "rx")
ax.autoscale(enable=True)
if not with_axis:
ax.axis('off')
def _contour(ax, title, elements, values, label='', isa=None):
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
if ax == 0:
ax = plt.gca()
ax.set_aspect('equal')
ax.set_title(title, fontsize=18)
if isa:
for se in isa.superelements:
ax.add_patch(Polygon([n.xy
for nc in se.nodechains
for n in nc.nodes],
color='gray', alpha=0.1, lw=0))
valid_values = np.logical_not(np.isnan(values))
patches = np.array([Polygon([v.xy for v in e.vertices])
for e in elements])[valid_values]
# , cmap=matplotlib.cm.jet, alpha=0.4)
p = PatchCollection(patches, alpha=1.0, match_original=False)
p.set_array(np.asarray(values)[valid_values])
ax.add_collection(p)
cb = plt.colorbar(p)
for patch in np.array([Polygon([v.xy for v in e.vertices],
fc='white', alpha=1.0)
for e in elements])[np.isnan(values)]:
ax.add_patch(patch)
if label:
cb.set_label(label=label, fontsize=18)
ax.autoscale(enable=True)
ax.axis('off')
def demag(isa, ax=0):
"""plot demag of NC/I7/ISA7 model
Args:
isa: Isa7/NC object
"""
emag = [e for e in isa.elements if e.is_magnet()]
demag = np.array([e.demagnetization(isa.MAGN_TEMPERATURE) for e in emag])
_contour(ax, f'Demagnetization at {isa.MAGN_TEMPERATURE} °C',
emag, demag, '-H / kA/m', isa)
logger.info("Max demagnetization %f", np.max(demag))
def demag_pos(isa, pos, icur=-1, ibeta=-1, ax=0):
"""plot demag of NC/I7/ISA7 model at rotor position
Args:
isa: Isa7/NC object
pos: rotor position in degree
icur: cur amplitude index or last index if -1
ibeta: beta angle index or last index if -1
"""
emag = [e for e in isa.elements if e.is_magnet()]
demag = np.array([isa.demagnetization(e, icur, ibeta)[1]
for e in emag])
for i, x in enumerate(isa.pos_el_fe_induction):
if x >= pos/180*np.pi:
break
hpol = demag[:, i]
hpol[hpol == 0] = np.nan
_contour(ax, f'Demagnetization at Pos. {round(x/np.pi*180)}° ({isa.MAGN_TEMPERATURE} °C)',
emag, hpol, '-H / kA/m', isa)
logger.info("Max demagnetization %f kA/m", np.nanmax(hpol))
def flux_density(isa, subreg=[], ax=0):
"""plot flux density of NC/I7/ISA7 model
Args:
isa: Isa7/NC object
"""
if subreg:
if isinstance(subreg, list):
sr = subreg
else:
sr = [subreg]
elements = [e for s in sr for se in isa.get_subregion(s).elements()
for e in se]
else:
elements = [e for e in isa.elements]
fluxd = np.array([np.linalg.norm(e.flux_density()) for e in elements])
_contour(ax, f'Flux Density T', elements, fluxd)
logger.info("Max flux dens %f", np.max(fluxd))
def loss_density(isa, subreg=[], ax=0):
"""plot loss density of NC/I7/ISA7 model
Args:
isa: Isa7/NC object
"""
if subreg:
if isinstance(subreg, list):
sr = subreg
else:
sr = [subreg]
elements = [e for s in sr for sre in isa.get_subregion(s).elements()
for e in sre]
else:
elements = [e for e in isa.elements]
lossd = np.array([e.loss_density*1e-3 for e in elements])
_contour(ax, 'Loss Density kW/m³', elements, lossd)
def mmf(f, title='', ax=0):
| |
"""Controller requesting state from datapath."""
# System imports
from enum import Enum
# Local source tree imports
from pyof.foundation.base import GenericMessage, GenericStruct, IntEnum
from pyof.foundation.basic_types import (
BinaryData, FixedTypeList, Pad, UBInt8, UBInt16, UBInt32, UBInt64)
from pyof.v0x05.common.flow_match import Match
from pyof.v0x05.common.header import Header, Type
from pyof.v0x05.common.port import PortNo
from pyof.v0x05.controller2switch.common import (
ExperimenterMultipartHeader, MultipartType, TableFeatures)
from pyof.v0x05.controller2switch.group_mod import Group
from pyof.v0x05.controller2switch.meter_mod import Meter
from pyof.v0x05.controller2switch.modify_flow_table_message import Table
# Third-party imports
__all__ = ('MultipartRequest', 'MultipartRequestFlags', 'FlowMonitorCommand', 'FlowMonitorFlags',
'AggregateStatsRequest', 'FlowStatsRequest', 'FlowMonitorRequest',
'PortStatsRequest', 'QueueStatsRequest',
'GroupStatsRequest', 'MeterMultipartRequest')
# Enum
class MultipartRequestFlags(IntEnum):
"""Flags for MultipartRequest."""
#: No more requests to follow (This is not part of spec). Thanks @jondef95
OFPMPF_REQ_NONE = 0
#: More requests to follow
OFPMPF_REQ_MORE = 1 << 0
class FlowMonitorCommand(IntEnum):
"""Flow monitor commands"""
#: New flow monitor
OFPFMC_ADD = 0
#: Modify existing flow monitor
OFPFMC_MODIFY = 1
#: Delete / cancel existing flow monitor
OFPFMC_DELETE = 2
class FlowMonitorFlags(IntEnum):
"""’flags’ bits in struct of_flow_monitor_request"""
#: Initially matching flows
OFPFMF_INITIAL = 1 << 0
#: New matching flows as they are added
OFPFMF_ADD = 1 << 1
#: Old matching flows as they are removed
OFPFMF_REMOVED = 1 << 2
#: Matching flows as they are changed What to include in updates
OFPFMF_MODIFY = 1 << 3
#: If set, instructions are included
OFPFMF_INSTRUCTIONS = 1 << 4
#: If set, include own changes in full
OFPFMF_NO_ABBREV = 1 << 5
#: If set, don’t include other controllers
OFPFMF_ONLY_OWN = 1 << 6
# Classes
class MultipartRequest(GenericMessage):
"""Request datapath state.
While the system is running, the controller may request state from the
datapath using the OFPT_MULTIPART_REQUEST message.
"""
#: Openflow :class:`~pyof.v0x05.common.header.Header`
header = Header(message_type=Type.OFPT_MULTIPART_REQUEST)
#: One of the OFPMP_* constants.
multipart_type = UBInt16(enum_ref=MultipartType)
#: OFPMPF_REQ_* flags.
flags = UBInt16(enum_ref=MultipartRequestFlags)
#: Padding
pad = Pad(4)
#: Body of the request
body = BinaryData()
def __init__(self, xid=None, multipart_type=None, flags=0, body=b''):
"""Create a MultipartRequest with the optional parameters below.
Args:
xid (int): xid to the header.
multipart_type (int): One of the OFPMP_* constants.
flags (int): OFPMPF_REQ_* flags.
body (bytes): Body of the request.
"""
super().__init__(xid)
self.multipart_type = multipart_type
self.flags = flags
self.body = body
def pack(self, value=None):
"""Pack a MultipartRequest using the object's attributes.
This method will pack the attribute body and multipart_type before pack
the MultipartRequest object, then will return this struct as a
binary data.
Args:
value (:class:`~MultipartRequest`): Object to be packed.
Returns:
bytes: Binary data with MultipartRequest packed.
"""
buff = self.body
if not value:
value = self.body
if value:
if isinstance(value, (list, FixedTypeList)):
obj = self._get_body_instance()
obj.extend(value)
elif hasattr(value, 'pack'):
obj = value
self.body = obj.pack()
multipart_packed = super().pack()
self.body = buff
return multipart_packed
def unpack(self, buff, offset=0):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results. It is an inplace method and it receives the binary data
of the message **without the header**.
This class' unpack method is like the :meth:`.GenericMessage.unpack`
one, except for the ``body`` attribute which has its type determined
by the ``multipart_type`` attribute.
Args:
buff (bytes): Binary data package to be unpacked, without the
header.
"""
super().unpack(buff[offset:])
self._unpack_body()
def _unpack_body(self):
"""Unpack `body` replace it by the result."""
obj = self._get_body_instance()
obj.unpack(self.body.value)
self.body = obj
def _get_body_instance(self):
"""Return the body instance."""
simple_body = {
MultipartType.OFPMP_FLOW: FlowStatsRequest,
MultipartType.OFPMP_AGGREGATE: AggregateStatsRequest,
MultipartType.OFPMP_PORT_STATS: PortStatsRequest,
MultipartType.OFPMP_QUEUE: QueueStatsRequest,
MultipartType.OFPMP_GROUP: GroupStatsRequest,
MultipartType.OFPMP_METER: MeterMultipartRequest,
MultipartType.OFPMP_EXPERIMENTER: ExperimenterMultipartHeader
}
array_of_bodies = {MultipartType.OFPMP_TABLE_FEATURES: TableFeatures}
if isinstance(self.multipart_type, UBInt16):
self.multipart_type = self.multipart_type.enum_ref(
self.multipart_type.value)
pyof_class = simple_body.get(self.multipart_type, None)
if pyof_class:
return pyof_class()
array_of_class = array_of_bodies.get(self.multipart_type, None)
if array_of_class:
return FixedTypeList(pyof_class=array_of_class)
return BinaryData(b'')
class AggregateStatsRequest(GenericStruct):
"""Body for ofp_stats_request of type OFPST_AGGREGATE."""
#: ID of table to read (from ofp_table_stats) OFPTT_ALL for all tables.
table_id = UBInt8()
#: Align to 32 bits.
pad = Pad(3)
#: Require matching entries to include this as an output port. A value of
#: OFPP_ANY indicates no restriction.
out_port = UBInt32()
#: Require matching entries to include this as an output group. A value of
#: OFPG_ANY indicates no restriction.
out_group = UBInt32()
#: Align to 64 bits
pad2 = Pad(4)
#: Require matching entries to contain this cookie value
cookie = UBInt64()
#: Mask used to restrict the cookie bits that must match. A value of 0
#: indicates no restriction.
cookie_mask = UBInt64()
#: Fields to match. Variable size.
match = Match()
def __init__(self, table_id=Table.OFPTT_ALL, out_port=PortNo.OFPP_ANY,
out_group=Group.OFPG_ANY, cookie=0, cookie_mask=0,
match=None):
"""Create a AggregateStatsRequest with the optional parameters below.
Args:
table_id (int): ID of table to read (from ofp_table_stats)
OFPTT_ALL for all tables.
out_port (int): Require matching entries to include this as an
output port. A value of OFPP_ANY indicates no restriction.
out_group (int): Require matching entries to include this as an
output group. A value of OFPG_ANY indicates no restriction.
cookie (int): Require matching entries to contain this cookie value
cookie_mask (int): Mask used to restrict the cookie bits that must
match. A value of 0 indicates no restriction.
match (~pyof.v0x05.common.flow_match.Match):
Fields to match. Variable size
"""
super().__init__()
self.table_id = table_id
self.out_port = out_port
self.out_group = out_group
self.cookie = cookie
self.cookie_mask = cookie_mask
self.match = Match() if match is None else match
class FlowStatsRequest(GenericStruct):
"""Body for ofp_stats_request of type OFPST_FLOW."""
table_id = UBInt8()
#: Align to 32 bits.
pad = Pad(3)
out_port = UBInt32()
out_group = UBInt32()
pad2 = Pad(4)
cookie = UBInt64()
cookie_mask = UBInt64()
match = Match()
def __init__(self, table_id=Table.OFPTT_ALL, out_port=PortNo.OFPP_ANY,
out_group=Group.OFPG_ANY, cookie=0, cookie_mask=0,
match=None):
"""Create a FlowStatsRequest with the optional parameters below.
Args:
table_id (int): ID of table to read (from pyof_table_stats)
0xff for all tables or 0xfe for emergency.
out_port (:class:`int`, :class:`~pyof.v0x05.common.port.PortNo`):
Require matching entries to include this as an output port.
A value of :attr:`.PortNo.OFPP_ANY` indicates no restriction.
out_group: Require matching entries to include this as an output
group. A value of :attr:`Group.OFPG_ANY` indicates no
restriction.
cookie: Requires matching entries to contain this cookie value
cookie_mask: Mask used to restrict the cookie bits that must match.
A value of 0 indicates no restriction.
match (~pyof.v0x05.common.flow_match.Match): Fields to match.
"""
super().__init__()
self.table_id = table_id
self.out_port = out_port
self.out_group = out_group
self.cookie = cookie
self.cookie_mask = cookie_mask
self.match = Match() if match is None else match
class PortStatsRequest(GenericStruct):
"""Body for ofp_stats_request of type OFPST_PORT."""
port_no = UBInt32()
#: Align to 64-bits.
pad = Pad(4)
def __init__(self, port_no=PortNo.OFPP_ANY):
"""Create a PortStatsRequest with the optional parameters below.
Args:
port_no (:class:`int`, :class:`~pyof.v0x05.common.port.PortNo`):
:attr:`StatsType.OFPST_PORT` message must request statistics
either for a single port (specified in ``port_no``) or for all
ports (if ``port_no`` == :attr:`.PortNo.OFPP_ANY`).
"""
super().__init__()
self.port_no = port_no
class QueueStatsRequest(GenericStruct):
"""Implements the request body of a ``port_no``."""
port_no = UBInt32()
queue_id = UBInt32()
def __init__(self, port_no=PortNo.OFPP_ANY, queue_id=0xffffffff):
"""Create a QueueStatsRequest with the optional parameters below.
Args:
port_no (:class:`int`, :class:`~pyof.v0x05.common.port.Port`):
All ports if :attr:`.Port.OFPP_ALL`.
queue_id (int): All queues if OFPQ_ALL (``0xfffffff``).
"""
super().__init__()
self.port_no = port_no
self.queue_id = queue_id
class GroupStatsRequest(GenericStruct):
"""Body of OFPMP_GROUP request."""
#: Group id. All groups is OFPG_ALL
group_id = UBInt32()
#: Align to 64 bits
pad = Pad(4)
def __init__(self, group_id=Group.OFPG_ALL):
"""Create a GroupStatsRequest with the optional parameters below.
Args:
group_id(int): ID of group to read. OFPG_ALL to request informatio
for all groups.
"""
super().__init__()
self.group_id = group_id
class MeterMultipartRequest(GenericStruct):
"""MeterMultipartRequest structure.
This class represents the structure for ofp_meter_multipart_request.
This structure is a body of OFPMP_METER and OFPMP_METER_CONFIG requests.
"""
# Meter instance, or OFPM_ALL.
meter_id = UBInt32()
# Align to 64 bits.
pad = Pad(4)
def __init__(self, meter_id=Meter.OFPM_ALL):
"""Create a MeterMultipartRequest with the optional parameters below.
Args:
meter_id(Meter): Meter Indentify.The value Meter.OFPM_ALL is used
to refer to all Meters on the switch.
"""
super().__init__()
self.meter_id = meter_id
class FlowMonitorRequest(GenericStruct):
"""
Body for ofp_multipart_request of type OFPMP_FLOW_MONITOR.
The OFPMP_FLOW_MONITOR request’s body consists of an array of zero or more
instances of this structure. The request arranges to monitor the flows
that match the specified criteria, which are interpreted in the same way as
for OFPMP_FLOW.
’id’ identifies a particular monitor for the purpose of allowing it to be
canceled later with OFPFMC_DELETE. ’id’ must be unique | |
"112314": ("Neck Component", [7307, 7308]),
},
"NecroticLipidicPlaque": {
"122395": ("Necrotic-Lipidic Plaque", [3491, 3495, 3497]),
},
"NeedleGauge": {
"111465": ("Needle Gauge", [6095]),
},
"NeedleInTarget": {
"111438": ("Needle in target", []),
},
"NeedleLength": {
"111467": ("Needle Length", [6095]),
},
"NeedleLocalizationAndBiopsy": {
"111144": ("Needle localization and biopsy", [6028, 6029, 6051, 6058, 6061]),
},
"NegativeDCE": {
"130601": ("Negative DCE", [6335, 6345, 6346]),
},
"NegativeEnhancementIntegral": {
"113054": ("Negative enhancement integral", [218, 7180, 7469]),
},
"NegativeExponential": {
"130252": ("Negative exponential", [73]),
},
"NegativeInfinity": {
"114001": ("Negative Infinity", [42, 43]),
},
"NeighborhoodAnalysis": {
"123101": ("Neighborhood Analysis", [7162]),
},
"NeighbourhoodGreyToneDifferenceMatrix": {
"128779": ("Neighbourhood Grey Tone Difference Matrix", []),
},
"NeighbouringGreyLevelDependenceMatrix": {
"128780": ("Neighbouring Grey Level Dependence Matrix", []),
},
"NeoplasmOfTheMammarySkin": {
"111335": ("Neoplasm of the mammary skin", []),
},
"NetForwardVolume": {
"122645": ("Net Forward Volume", []),
},
"NetworkConfiguration": {
"110128": ("Network Configuration", [401, 403]),
},
"NetworkEntry": {
"110108": ("Network Entry", [400]),
},
"Neurofibromatosis": {
"111288": ("Neurofibromatosis", []),
},
"NeuroimagingSubjectMatter": {
"128733": ("Neuroimaging subject matter", [7017]),
},
"NeurologySpecialty": {
"128010": ("Neurology Specialty", [7449]),
},
"NeuroradiologicImagingSpecialty": {
"128011": ("Neuroradiologic Imaging Specialty", []),
},
"NeuroradiologyImagingSpecialty": {
"128011": ("Neuroradiology Imaging Specialty", [7449]),
},
"NeutralMusculoskeletalPosition": {
"109136": ("Neutral musculoskeletal position", [92]),
},
"NifeneF18": {
"126714": ("Nifene F^18^", [4021]),
},
"NilPerOsNPOStatusConfirmed": {
"122006": ("Nil Per Os (NPO) status confirmed", [3402]),
},
"NiobiumOrNiobiumCompound": {
"113710": ("Niobium or Niobium compound", []),
},
"NippleCharacteristic": {
"111297": ("Nipple Characteristic", []),
},
"NippleDischargeCytology": {
"111564": ("Nipple discharge cytology", [6083]),
},
"NippleInvolved": {
"111472": ("Nipple involved", []),
},
"NippleNotInProfile": {
"111205": ("Nipple not in profile", [6041]),
},
"NippleRing": {
"112177": ("Nipple ring", [6102, 6138, 6404, 7151, 7193]),
},
"NoAbnormality": {
"111286": ("No abnormality", [6030, 6031]),
},
"NoAlgorithmsSucceededWithoutFindings": {
"111245": ("No algorithms succeeded; without findings", [6047]),
},
"NoAttenuationCorrection": {
"122729": ("No Attenuation Correction", [3112]),
},
"NoComplications": {
"111492": ("No complications", [6062]),
},
"NoCornealCompensation": {
"111922": ("No corneal compensation", [4261]),
},
"NoCorrelationToClinicalFindings": {
"111387": ("No correlation to clinical findings", [6158]),
},
"NoCorrelationToOtherImagingFindings": {
"111386": ("No correlation to other imaging findings", [6158]),
},
"NoEmptyTileSuppression": {
"112721": ("No empty tile suppression", [8133]),
},
"NoFamilyHistoryOfProstateCancer": {
"130587": ("No family history of prostate cancer", [6322]),
},
"NoFilter": {
"111609": ("No filter", [4204, 8124, 10007]),
},
"NoGrid": {
"111646": ("No grid", [10017]),
},
"NoImage": {
"111213": ("No image", [6041, 6135, 7011]),
},
"NoKnownExposure": {
"111587": ("No known exposure", [6090]),
},
"NoPosteriorAcousticFeatures": {
"111367": ("No posterior acoustic features", [6155]),
},
"NoRealignment": {
"122477": ("No Realignment", [3458]),
},
"NoSubsequentWorkitems": {
"110009": ("No subsequent Workitems", [9231]),
},
"NodeAuthentication": {
"110126": ("Node Authentication", [401, 403]),
},
"NodeID": {
"110182": ("Node ID", [404]),
},
"NodularPattern": {
"112067": ("Nodular pattern", [6102, 6104, 6106]),
},
"Nomenclature": {
"127413": ("Nomenclature", []),
},
"NominalEmptyTileSuppression": {
"112719": ("Nominal empty tile suppression", [8133]),
},
"NominalRadiationSourceLocation": {
"130358": ("Nominal Radiation Source Location", [9544, 9554]),
},
"NominalSingleCollimationWidth": {
"113826": ("Nominal Single Collimation Width", []),
},
"NominalTotalCollimationWidth": {
"113827": ("Nominal Total Collimation Width", []),
},
"NonBloodyDischarge": {
"111478": ("Non-bloody discharge (from nipple)", [6055]),
},
"NonDiagnosticECG": {
"122753": ("Non-diagnostic ECG", [3677]),
},
"NonDiagnosticLowHeartRate": {
"122750": ("Non-diagnostic - low heart rate", [3231]),
},
"NonDiagnosticRestingSTAbnormalities": {
"122751": ("Non-diagnostic - resting ST abnormalities", [3231]),
},
"NonDiagnosticVentricularPacingOrLBBB": {
"122752": ("Non-diagnostic - ventricular pacing or LBBB", [3231]),
},
"NonFlatteningFilterBeam": {
"130356": ("Non-Flattening Filter Beam", [9549]),
},
"NonFocalAbnormality": {
"130592": ("Non-focal abnormality", []),
"Non-focal abnormality": ("Non-focal abnormality", [6335, 6336, 6337]),
},
"NonImagingDopplerUltrasoundTransducerGeometry": {
"125251": ("Non-imaging Doppler ultrasound transducer geometry", [12033]),
},
"NonIonicIodinatedContrastAgent": {
"127855": ("Non-ionic iodinated contrast agent", []),
},
"NonLesion": {
"111102": ("Non-lesion", [6014, 6016, 6054, 6101, 6201]),
},
"NonLesionAtBaseline": {
"112076": ("Non-Lesion at Baseline", [6145]),
},
"NonLesionModifier": {
"112037": ("Non-lesion Modifier", []),
},
"NonSpecificVolume": {
"130046": ("Non-specific Volume", [9501, 9502]),
},
"NonSynchronizedRoboticTreatment": {
"130140": ("Non-Synchronized Robotic Treatment", [9523, 9524]),
},
"NonTargetLesionAtBaseline": {
"112075": ("Non-Target Lesion at Baseline", [6145]),
},
"NonTargetLesionCompleteResponse": {
"112045": ("Non-Target Lesion Complete Response", [6143, 6144]),
},
"NonTargetLesionIncompleteResponseOrStableDisease": {
"112046": (
"Non-Target Lesion Incomplete Response or Stable Disease",
[6143, 6144],
),
},
"NonTargetLesionProgressiveDisease": {
"112047": ("Non-Target Lesion Progressive Disease", [6143, 6144]),
},
"NonUterineLeiomyosarcoma": {
"130406": ("Non-uterine leiomyosarcoma", [638, 639]),
},
"NormalAxillaryNode": {
"111251": ("Normal axillary node", [6030, 6031]),
},
"NormalBreastTissue": {
"111287": ("Normal breast tissue", [6030, 6031, 6054, 6057]),
},
"NormalImplants": {
"111503": ("Normal implants", [6072]),
},
"NormalIntervalFollowUp": {
"111140": ("Normal interval follow-up", [6028, 6029]),
},
"NormalMyocardium": {
"122112": ("Normal Myocardium", [3704]),
},
"NormalRangeAuthority": {
"121408": ("Normal Range Authority", []),
},
"NormalRangeDescription": {
"121407": ("Normal Range description", []),
},
"Normality": {
"121402": ("Normality", []),
},
"NormalizationFactor": {
"128522": ("Normalization Factor", [10069]),
},
"NormalizedChordLength": {
"122450": ("Normalized Chord Length", []),
},
"NormalizedValuesOfVentricularMeasurements": {
"122609": ("Normalized Values Of Ventricular Measurements", []),
},
"NorthAmericanPurebredDogRegistry": {
"109216": ("North American Purebred Dog Registry", [7481]),
},
"NoseCone": {
"127060": ("Nose cone", [617]),
},
"NotANumber": {
"114000": ("Not a number", [42, 43]),
},
"NotAllAlgorithmsSucceededWithFindings": {
"111244": ("Not all algorithms succeeded; with findings", [6047]),
},
"NotAllAlgorithmsSucceededWithoutFindings": {
"111243": ("Not all algorithms succeeded; without findings", [6047]),
},
"NotAttempted": {
"111225": ("Not Attempted", [6042]),
},
"NotForPresentationRenderingDeviceExpectedNotToPresent": {
"111152": (
"Not for Presentation: Rendering device expected not to present",
[6034],
),
},
"NotOptimizedForTheDeviceInstance": {
"128620": ("Not optimized for the device instance", [800]),
},
"NotParallel": {
"111356": ("Not parallel", [6152]),
},
"NotSure": {
"111399": ("Not sure", [6164]),
},
"NotVisualized": {
"122288": ("Not visualized", [3703]),
},
"NuclearMedicine": {
"NM": ("Nuclear Medicine", [29, 30, 33]),
},
"NuclearMedicineImagingSubjectMatter": {
"128735": ("Nuclear medicine imaging subject matter", [7017]),
},
"NuclearMedicineProjectionActivity": {
"110820": ("Nuclear Medicine Projection Activity", [218, 7180, 7469]),
},
"NuclearMedicineTomographicActivity": {
"110821": ("Nuclear Medicine Tomographic Activity", [218, 7180, 7469]),
},
"NumberOfAnimalsWithinSameHousingUnit": {
"127143": ("Number of animals within same housing unit", []),
},
"NumberOfCalcifications": {
"111038": ("Number of calcifications", []),
},
"NumberOfDiseasedVesselTerritories": {
"122762": ("Number of diseased vessel territories", []),
},
"NumberOfEctopicBeats": {
"122707": ("Number of Ectopic Beats", []),
},
"NumberOfFetuses": {
"121038": ("Number of Fetuses", []),
},
"NumberOfFirstDegreeRelativesAffectedByMalignantMelanoma": {
"130487": (
"Number of first-degree relatives affected by malignant melanoma",
[],
),
},
"NumberOfFocalPlanes": {
"112707": ("Number of focal planes", []),
},
"NumberOfFractionsCompleted": {
"121387": ("Number of Fractions Completed", []),
},
"NumberOfFractionsPlanned": {
"121386": ("Number of Fractions Planned", []),
},
"NumberOfFrames": {
"121140": ("Number of Frames", []),
},
"NumberOfHousingUnitsPerRack": {
"127141": ("Number of housing units per rack", []),
},
"NumberOfImagesUsedForMacularMeasurements": {
"111691": ("Number of Images Used for Macular Measurements", []),
},
"NumberOfInjectorHeads": {
"130219": ("Number of Injector Heads", []),
},
"NumberOfLesionInterventionsAttempted": {
"122175": ("Number of lesion interventions attempted", []),
},
"NumberOfLesionInterventionsSuccessful": {
"122176": ("Number of lesion interventions successful", []),
},
"NumberOfMalignantMelanomas": {
"130483": ("Number of malignant melanomas", []),
},
"NumberOfMelanomasInSitu": {
"130484": ("Number of melanomas in situ", []),
},
"NumberOfNeedlesAroundTarget": {
"111439": ("Number of needles around target", []),
},
"NumberOfNodesPositive": {
"111474": ("Number of nodes positive", []),
},
"NumberOfNodesRemoved": {
"111473": ("Number of nodes removed", []),
},
"NumberOfPasses": {
"111436": ("Number of passes", []),
},
"NumberOfPulses": {
"113768": ("Number of Pulses", []),
},
"NumberOfRacksPerRoom": {
"127140": ("Number of racks per room", []),
},
"NumberOfSamplesUsedPerImage": {
"111692": ("Number of Samples Used per Image", []),
},
"NumberOfSimilarFindings": {
"111406": ("Number of similar findings", []),
},
"NumberOfSpecimens": {
"111437": ("Number of specimens", []),
},
"NumberOfStimulusEvents": {
"130494": ("Number of Stimulus Events", []),
},
"NumberOfXRaySources": {
"113823": ("Number of X-Ray Sources", []),
},
"Nurse": {
"121082": ("Nurse", []),
},
"NursingNote": {
"121172": ("Nursing Note", [3401]),
},
"NursingUnitCancel": {
"110511": ("Nursing unit cancel", [9300, 9301]),
},
"OBGYNUltrasoundProcedureReport": {
"125000": ("OB-GYN Ultrasound Procedure Report", [12024]),
},
"OBGynImagingSpecialty": {
"128012": ("OB/Gyn Imaging Specialty", [7449]),
},
"OCTAAmplitudeDecorrelation": {
"128252": ("OCT-A amplitude decorrelation", [4270]),
},
"OCTAComplexVariance": {
"128253": ("OCT-A complex variance", [4270]),
},
"OCTACorrelationMapping": {
"128255": ("OCT-A correlation mapping", [4270]),
},
"OCTAOneSidedRatioGreater": {
"128305": ("OCT-A one-sided ratio (greater)", [4270]),
},
"OCTAOneSidedRatioLesser": {
"128304": ("OCT-A one-sided ratio (lesser)", [4270]),
},
"OCTASpeckleVariance": {
"128254": ("OCT-A speckle variance", [4270]),
},
"OCTBScanAnalysis": {
"128303": ("OCT B-scan analysis", [7203]),
},
"OIQPattern": {
"109901": ("OIQ Pattern", [8301]),
},
"OLED": {
"109994": ("OLED", [8303]),
},
"OLINDAEXM": {
"113527": ("OLINDA-EXM", [10040]),
},
"OSEMAlgorithm": {
"122720": ("OSEM algorithm", [3117]),
},
"OSLD": {
"128706": ("OSLD", [7026, 7027, 7151, 7193]),
},
| |
"""App for calculating the cumulative probability of *r* successes in *n* trials.
Determine the number of trials needed to reach a certain probability threshold
or find the probability at a specified number of trials.
[Streamlit](https://share.streamlit.io/hqn006/streamlit-probability/main/cumulative.py)
[GitHub](https://github.com/hqn006/streamlit-probability)
## [Definition of Cumulative Probability](https://en.wikipedia.org/wiki/Cumulative_distribution_function)
The cumulative probability of a random variable *X* evaluated at *x* is defined as
the probability that *X* will take a value less than or equal to *x*. The cumulative
distribution function is given by
$$ F_X(x) = P(X \\leq x) $$
The variables *x* and *r* are used interchangeably in this document.
## Calculating Cumulative Probability
This module examines the case where *X* is a binomial distribution with *r* being
the number of desired successes, *p* being the probability of success for each trial,
and *n* ranging from *r* to a maximum input value.
### Probability of "exactly *r* successes"
To calculate the the probability of *x* successes in *n* trials, use the binomial
theorem.
$$ P(X = x) = \\binom{n}{x} p^x (1-p)^{n-x} $$
This module uses the [`math.comb`](https://docs.python.org/3/library/math.html)
function for "n choose k" type calculations.
### Probability of "at most *r* successes"
Sum all probabilities that *X* will take values within a range of "at most *r*
successes" with
$$ P(X \\leq x) = \\sum_{k=0}^{r} P(X = k) $$
It follows that the probability of "less than *r* successes" is
$$ P(X < x) = \\sum_{k=0}^{r-1} P(X = k) $$
### Probability of "at least *r* successes"
Obtain the probability of "greather than *r* successes" using the complemenent
of "at most *r* successes"
$$ P(X > x) = 1 - P(X \\leq x) $$
Then the probability of "at least *r* successes" is the complement of "less than
*r* successes"
$$ P(X \\geq x) = 1 - P(X < x) $$
"""
from math import comb
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import streamlit as st
def main():
"""Main function. Set up widgets, calculate, plot."""
st.set_page_config(layout='wide')
# Set up sidebar input widgets
with st.sidebar:
P_des, n_des, p, r, n_max = params()
complementary, inclusive, out_txt = range_cond()
# Proportions of output screen
left_column, right_column = st.columns([1,3])
# Calculations
probs = Cumulative(r, n_max)
probs.calc(p, r, complementary, inclusive)
probs.find_desired(P_des, n_des, complementary)
# DataFrame
with left_column:
df = probs.show_data()
download_df(df)
# Plot
with right_column:
probs.plot_graph(r, out_txt)
return None
class Cumulative:
def __init__(self, r, n_max):
"""Initialize Cumulative class containing probability calculations.
Parameters
----------
r : int
Number of successes
n_max : int
Max number of trials
"""
self.N = np.arange(r-1, n_max, 1)
"""Array containing numbers of trials ascending"""
self.P = np.zeros(self.N.shape)
"""Array of cumulative probabilities corresponding to `N`"""
self.n_found = -1
"""Number of trials closest to desired cumulative probability (see `P_des`)"""
self.P_closest = 0
"""Cumulative probability closest to desired (see `P_des`)"""
self.P_found = -1
"""Cumulative probability found at desired number of trials (see `n_des`)"""
self.n_closest = 0
"""Number of trials closest to desired (equal to `n_des`)"""
def calc(self, p, r, complementary, inclusive):
"""Send parameters to cached function outside of class to calculate.
Write the resulting probability array to the class parameter `P`.
Parameters
----------
p : float
Probability of one successful event
r : int
Number of successes
complementary : bool
Specifies cumulative probability or its complement
inclusive : bool
Specifies whether edge case is inclusive
"""
self.P = calc_prob(self.N, p, r, complementary, inclusive)
return None
def find_desired(self, P_des, n_des, complementary):
"""Find closest number of trials that crosses the input probability threshold.
Find probability at the input number of trials.
Parameters
----------
P_des : float
Desired cumulative probability if wanting to find number of trials needed
n_des : int
Desired number of trials if wanting to find cumulative probability at a point
complementary : bool
Specifies cumulative probability or its complement
"""
i = 0 # index in P array
for n in self.N:
# Store point at input desired probability
if self.n_found <= 0:
if (
(complementary and self.P[i] > P_des) or
(not complementary and self.P[i] < P_des)
):
self.n_found = n
self.P_closest = self.P[i]
# Store point at input number of trials
if self.P_found <= 0:
if n == n_des:
self.P_found = self.P[i]
self.n_closest = n_des
# Only need to retrieve first time threshold is crossed
if self.n_found > 0 and self.P_found > 0:
break
i += 1
return None
def show_data(self):
"""Display whether the desired cumulative probability threshold is crossed
as "FOUND" and the closest data point. Output total probability DataFrame.
Returns
-------
df : pandas.DataFrame
Dataframe containing `N` and `P` arrays
"""
# Found point at input desired probability
if self.n_found > 0:
st.success("Desired Cumulative Probability **FOUND**")
else:
st.warning("Desired Cumulative Probability **NOT FOUND**")
st.write("Cumulative Probability:", self.P_closest)
st.write("Number of trials:", self.n_found)
# Found point at input number of trials
if self.P_found > 0:
st.success("Probability at Number of Trials **FOUND**")
else:
st.warning("Probability at Number of Trials **NOT FOUND**")
st.write("Number of trials:", self.n_closest)
st.write("Cumulative Probability:", self.P_found)
df = pd.DataFrame({'N': self.N, 'P': self.P})
st.dataframe(df, None, 700)
return df
def plot_graph(self, r, out_txt):
"""Plot cumulative probability distribution using Matplotlib.
Parameters
----------
r : int
Number of successes
out_txt : str
Output text description of range conditions
"""
fig, ax = plt.subplots()
ax.set_title(f"Cumulative Probability of {out_txt} {r} Successes in n Trials")
ax.set_xlabel("Number of Trials")
ax.set_ylabel("Cumulative Probability")
ax.plot(self.N, self.P)
# Point at input desired probability
ax.hlines(self.P_closest, 0, self.N[-1], 'r', 'dashed')
ax.text(self.n_found, self.P_closest + 0.01,
f'({self.n_found}, {self.P_closest:.3f})')
# Point at input number of trials
ax.plot(self.n_closest, self.P_found, 'r+')
ax.text(self.n_closest, self.P_found + 0.01,
f'({self.n_closest}, {self.P_found:.3f})')
ax.set_ylim(0,1)
st.pyplot(fig)
return None
@st.experimental_memo
def calc_prob(N, p, r, complementary, inclusive):
"""Calculate cumulative probabilities.
Parameters
----------
N : ndarray
Array containing numbers of trials ascending
p : float
Probability of one successful event
r : int
Number of successes
complementary : bool
Specifies cumulative probability or its complement
inclusive : bool
Specifies whether edge case is inclusive
Returns
-------
P : ndarray
Array of cumulative probabilities corresponding to `N`
"""
P = np.zeros(N.shape)
i = 0 # index in P array
for n in N:
# Must consider that 1 - P will be executed later
if complementary: # At Least or Greater Than
r_include = (r-1 if inclusive else r)
else: # At most or Less Than
r_include = (r if inclusive else r-1)
# Sum up all exactly x successes
sum_exactly = 0
for k in range(r_include, -1, -1):
exactly = comb(n,k) * p**k * (1-p)**(n-k)
sum_exactly += exactly
# Probability array
if complementary:
P[i] = 1 - sum_exactly
else:
P[i] = sum_exactly
i += 1
return P
def params():
"""Widgets for inputting parameters.
Returns
-------
P_des : float
Desired cumulative probability if wanting to find number of trials needed
n_des : int
Desired number of trials if wanting to find cumulative probability at a point
p : float
Probability of one successful event
r : int
Number of successes
n_max : int
Max number of trials
"""
st.header("Cumulative Probability Calculator")
P_des = st.number_input(
"Desired Cumulative Probability",
0.0, 1.0, 0.5,
step=0.1, format="%.3f"
)
n_des = st.number_input(
"Find Probability at Number of Trials",
value=-1,
step=1
)
"---"
p = st.number_input(
"Probability of event",
0.0, 1.0, 0.01,
step=0.001, format="%.8f"
)
r = st.number_input("Number of successes", 1)
n_max = st.number_input("Max number of trials", 1, value=500, step=100) + 1
return P_des, n_des, p, r, n_max
def range_cond():
"""Widgets for range conditions and edge case.
Returns
-------
complementary : bool
Specifies cumulative probability or its complement
inclusive : bool
Specifies whether edge case is inclusive
out_txt : str
Output text description of range conditions
"""
"Range Conditions"
# Default: At Most
complementary = st.checkbox("Complementary", False)
inclusive = st.checkbox("Inclusive", True)
if complementary:
out_txt = ("At Least" if inclusive else "Greater Than")
else:
out_txt = ("At Most" if inclusive else "Less Than")
st.info("Description: " + out_txt + " *r* Successes")
return complementary, inclusive, out_txt
def download_df(df):
"""Button to download DataFrame as CSV.
Parameters
----------
df : pandas.DataFrame
Dataframe containing `N` and `P` arrays
"""
@st.experimental_memo
def convert_df(df):
# IMPORTANT: Cache the conversion to prevent computation on every rerun
return df.to_csv().encode('utf-8')
csv = | |
"""
<NAME>
Midwater Assessment and Conservation Engineering
NOAA Alaska Fisheries Science Center
<EMAIL>
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from QIVPolygonItem import QIVPolygonItem
from QIVMarkerText import QIVMarkerText
class QIVPolygon(QGraphicsItemGroup):
"""
QIVPolygon implememts open and closed polygon items with simplified vertex
labeling. The labels are implemented by QIVMarkerText, are non-scaling,
and provide the ability to justify and offset labels from the vertex anchor.
If you only need a simple polygon object without labeling, you can use
QIVPolygonItem directly.
If a polygon is specified as "open" the last vertex is not connected
the first and the polygon cannot be filled. You can also think of open
polygons as polylines. "Closed" polygons do have their last vertext connected
to the first. Closed polygons can be filled by setting the fill keyword.
QIVPolygon Arguments:
vertices - The polygon vertices as:
A list of QPoint or QpointF objects defining the vertices
A list of [x,y] pairs (i.e. [[x,y],[x,y],[x,y],...]
A QRect or QRectF object
color - a 3 element list or tuple containing the RGB triplet
specifying the outline color of the polygon
thickness - A float specifying the outline thickness of the polygon.
alpha - A integer specifying the opacity of the polygon. 0 is transparent
and 255 is solid.
linestyle - '=' for solid, '-' for dashed, and '.' for dotted.
fill - a 3 element list or tuple containing the RGB triplet
specifying the fill color of the polygon. Set to None for
no fill.
"""
def __init__(self, vertices, color=[220,10,10], thickness=1.0,
alpha=255, linestyle='=', fill=None, selectable=True,
movable=False, selectThickness=4.0, selectColor=None,
closed=True, view=None, parent=None, name='QIVPolygon'):
super(QIVPolygon, self).__init__(parent)
self.name = name
self.view = view
self.polygon = None
self.labels = []
# create the polygon item - note that we make the item non-selectable and non-movable
# since we want to select/move the "this" object (the QGraphicsItemGroup) and not the
# items contained in it.
self.polygon = QIVPolygonItem(vertices, color=color, thickness=thickness,
alpha=alpha, linestyle=linestyle, fill=fill, selectable=False,
selectThickness=selectThickness, selectColor=selectColor,
movable=False, closed=closed, parent=self)
# and add it to our item group
self.addToGroup(self.polygon)
# now set selectable/movable flags for the itemgroup
self.setFlag(QGraphicsItem.ItemIsSelectable, selectable)
self.setFlag(QGraphicsItem.ItemIsMovable, movable)
def getLabelsFromName(self, labelName):
'''
returns a list of QIVMarkerText references that share the name provided in the
labelName argument.
'''
labelReferences = []
# find label(s) given the label name
for label in self.labels:
if (label.name == labelName):
labelReferences.append(label)
return labelReferences
def removeLabel(self, labels):
'''
removeLabel removes a marker label given the label reference or labelName.
You can also pass a list of references or names. If the label name is provided,
all labels with that name will be removed.
'''
if (labels.__class__.__name__.lower() == 'list'):
# we've been given a list of label references or names
for label in labels:
if (label.__class__.__name__.lower() == 'str'):
# assume this is a label name
labelRefs = self.getLabelsFromName(label)
for ref in labelRefs:
self.labels.remove(label)
self.removeFromGroup(label)
else:
# assume this is a label reference
try:
self.labels.remove(label)
self.removeFromGroup(label)
except:
# bad reference - not in our list of labels
pass
else:
# we've been given a single item - check if it is a name or ref
if (labels.__class__.__name__.lower() == 'str'):
# assume this is a label name
labelRefs = self.getLabelsFromName(label)
for ref in labelRefs:
self.labels.remove(label)
self.removeFromGroup(label)
else:
# assume this is a label reference
try:
self.labels.remove(label)
self.removeFromGroup(label)
except:
# bad reference - not in our list of labels
pass
def removeAllLabels(self):
'''
removeAllLabels is a convenience method to clear all labels associated with this mark.
'''
self.removeLabel(self.labels)
def getLabels(self):
'''
getLabels returns the list of labels associated with this mark
'''
return self.labels
def addLabel(self, vertex, text, size=10, font='helvetica', italics=False, weight=-1,
color=[0,0,0], alpha=255, halign='left', valign='top', name='QIVPolygonLabel',
offset=None):
"""
Add a label to the polygon at a specified vertex. Labels are children of the polygon.
vertex (int) - The 0 based vertex number to attach the label to.
text (string) - The text to add to the dimension line.
offset (QPointF) - An offset from your position. The units are pixels at the
image's native resolution. This gets muddled when used with
classes that transform coordinates, especially QMapViewer.
size (int) - The text size, in point size
font (string) - A string containing the font family to use. Either stick
to the basics with this (i.e. "times", "helvetica") or
consult the QFont docs.
italics (bool) - Set to true to italicise the font.
weight (int) - Set to an integer in the range 0-99. 50 is normal, 75 is bold.
color (list) - A 3 element list or tuple containing the RGB triplet
specifying the color of the text.
alpha (int) - An integer specifying the opacity of the text. 0 is transparent
and 255 is solid.
halign (string) - Set this value to set the horizontal anchor point. Values are:
'left' - Sets the anchor to the left side of the text
'center' - Sets the anchor to the middle of the text
'right' - Sets the anchor to the right side of the text
valign (string) - Set this value to set the vertical anchor point. Values are:
'top' - Sets the anchor to the top of the text
'center' - Sets the anchor to the middle of the text
'bottom' - Sets the anchor to the bottom of the text
name (string) - Set this to the name associated with the text object. The name
can be used to differentiate between your text objects.
"""
if (offset == None) or (offset == []):
offset = QPointF(0,0)
# get the position given the vertex index
position = self.polygon[vertex]
# create a QIVMarkerText associated with the provided mark/line
textItem = QIVMarkerText(position, text, offset=offset, size=size, font=font, italics=italics,
weight=weight, color=color, alpha=alpha, halign=halign,
valign=valign, name=name, view=self.view)
# add the label to our list of labels
self.labels.append(textItem)
self.addToGroup(textItem)
def setLabelText(self, labels, text):
'''
Sets the label text given the label reference or name and text.
'''
if (labels.__class__.__name__.lower() == 'list'):
# we've been given a list of label references or names
for label in labels:
if (label.__class__.__name__.lower() == 'str'):
# assume this is a label name
labelRefs = self.getLabelsFromName(label)
for ref in labelRefs:
ref.setText(text)
else:
# assume this is a label reference
try:
label.setText(text)
except:
# bad reference - not in our list of labels
pass
else:
# we've been given
if (labels.__class__.__name__.lower() == 'str'):
# assume this is a label name
labelRefs = self.getLabelsFromName(labels)
for ref in labelRefs:
ref.setText(text)
else:
# assume this is a label reference
try:
labels.setText(text)
except:
# bad reference - not in our list of labels
pass
def setLabelVisible(self, labels, show):
'''
Sets the label visibility given the label reference or name and the
visibility state.
'''
if (labels.__class__.__name__.lower() == 'list'):
# we've been given a list of label references or names
for label in labels:
if (label.__class__.__name__.lower() == 'str'):
# assume this is a label name
labelRefs = self.getLabelsFromName(label)
for ref in labelRefs:
ref.setVisible(show)
else:
# assume this is a label reference
try:
label.setVisible(show)
except:
# bad reference - not in our list of labels
pass
else:
# we've been given
if (labels.__class__.__name__.lower() == 'str'):
# assume this is a label name
labelRefs = self.getLabelsFromName(labels)
for ref in labelRefs:
ref.setVisible(show)
else:
# assume this is a label reference
try:
labels.setVisible(show)
except:
# bad reference - not in our list of labels
pass
def showLabels(self, labels=None):
"""
showLabels makes the provided label or labels visible. Labels can be
a list of label references, a list of label names, or a single reference
or name. If labels is None, all labels for this mark are visible.
"""
if (labels == None):
labels = self.labels
self.setLabelVisible(labels, True)
def hideLabels(self, labels=None):
"""
hideLabels makes the provided label or labels invisible. Labels can be
a list of label references, a list of label names, or a single reference
or name. If labels is None, all labels for this mark are hidden.
"""
if (labels | |
import copy
import contextlib
from functools import partialmethod
from contextlib import contextmanager,redirect_stderr,redirect_stdout
from os import devnull
import tqdm
from tqdm.auto import tqdm as tqdm_
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils import check_X_y
from mne.decoding import TimeDelayingRidge, ReceptiveField
from mne.decoding.time_delaying_ridge import _fit_corrs, _compute_reg_neighbors, _edge_correct
from mne.decoding.receptive_field import _reshape_for_est, _delays_to_slice, _times_to_delays, _delay_time_series, _corr_score, _r2_score
from ..out_struct import OutStruct
from ..utils import _parse_outstruct_args
from mne.cuda import _setup_cuda_fft_multiply_repeated
from mne.filter import next_fast_len
from mne.fixes import jit
from mne.parallel import check_n_jobs
from mne.utils import warn, logger
def _compute_corrs(X, y, smin, smax, n_jobs=1, fit_intercept=False,
edge_correction=True):
"""Compute auto- and cross-correlations.
This class if copied from mne.decoding.time_delaying_ridge.py, but removes
the progres bar.
"""
if fit_intercept:
# We could do this in the Fourier domain, too, but it should
# be a bit cleaner numerically to do it here.
X_offset = np.mean(X, axis=0)
y_offset = np.mean(y, axis=0)
if X.ndim == 3:
X_offset = X_offset.mean(axis=0)
y_offset = np.mean(y_offset, axis=0)
X = X - X_offset
y = y - y_offset
else:
X_offset = y_offset = 0.
if X.ndim == 2:
assert y.ndim == 2
X = X[:, np.newaxis, :]
y = y[:, np.newaxis, :]
assert X.shape[:2] == y.shape[:2]
len_trf = smax - smin
len_x, n_epochs, n_ch_x = X.shape
len_y, n_epcohs, n_ch_y = y.shape
assert len_x == len_y
n_fft = next_fast_len(2 * X.shape[0] - 1)
n_jobs, cuda_dict = _setup_cuda_fft_multiply_repeated(
n_jobs, [1.], n_fft, 'correlation calculations')
# create our Toeplitz indexer
ij = np.empty((len_trf, len_trf), int)
for ii in range(len_trf):
ij[ii, ii:] = np.arange(len_trf - ii)
x = np.arange(n_fft - 1, n_fft - len_trf + ii, -1)
ij[ii + 1:, ii] = x
x_xt = np.zeros([n_ch_x * len_trf] * 2)
x_y = np.zeros((len_trf, n_ch_x, n_ch_y), order='F')
n = n_epochs * (n_ch_x * (n_ch_x + 1) // 2 + n_ch_x)
logger.info('Fitting %d epochs, %d channels' % (n_epochs, n_ch_x))
count = 0
for ei in range(n_epochs):
this_X = X[:, ei, :]
# XXX maybe this is what we should parallelize over CPUs at some point
X_fft = cuda_dict['rfft'](this_X, n=n_fft, axis=0)
X_fft_conj = X_fft.conj()
y_fft = cuda_dict['rfft'](y[:, ei, :], n=n_fft, axis=0)
for ch0 in range(n_ch_x):
for oi, ch1 in enumerate(range(ch0, n_ch_x)):
this_result = cuda_dict['irfft'](
X_fft[:, ch0] * X_fft_conj[:, ch1], n=n_fft, axis=0)
# Our autocorrelation structure is a Toeplitz matrix, but
# it's faster to create the Toeplitz ourselves than use
# linalg.toeplitz.
this_result = this_result[ij]
# However, we need to adjust for coeffs that are cut off,
# i.e. the non-zero delays should not have the same AC value
# as the zero-delay ones (because they actually have fewer
# coefficients).
#
# These adjustments also follow a Toeplitz structure, so we
# construct a matrix of what has been left off, compute their
# inner products, and remove them.
if edge_correction:
_edge_correct(this_result, this_X, smax, smin, ch0, ch1)
# Store the results in our output matrix
x_xt[ch0 * len_trf:(ch0 + 1) * len_trf,
ch1 * len_trf:(ch1 + 1) * len_trf] += this_result
if ch0 != ch1:
x_xt[ch1 * len_trf:(ch1 + 1) * len_trf,
ch0 * len_trf:(ch0 + 1) * len_trf] += this_result.T
count += 1
# compute the crosscorrelations
cc_temp = cuda_dict['irfft'](
y_fft * X_fft_conj[:, slice(ch0, ch0 + 1)], n=n_fft, axis=0)
if smin < 0 and smax >= 0:
x_y[:-smin, ch0] += cc_temp[smin:]
x_y[len_trf - smax:, ch0] += cc_temp[:smax]
else:
x_y[:, ch0] += cc_temp[smin:smax]
count += 1
x_y = np.reshape(x_y, (n_ch_x * len_trf, n_ch_y), order='F')
return x_xt, x_y, n_ch_x, X_offset, y_offset
@contextmanager
def suppress_stdout_stderr():
"""A context manager that redirects stdout and stderr to devnull.
This is used to suppress tqdm outputs from fitting, which produce
a progress bar to sys.stderr."""
with open(devnull, 'w') as fnull:
with redirect_stderr(fnull) as err, redirect_stdout(fnull) as out:
yield (err, out)
class TRF(BaseEstimator):
'''
Allows the fitting of temporal receptive field (TRF) models
to one or more targets at a time using cross-validation. These can be
encoding models (stimulus-to-brain) or decoding (brain-to-stimulus) models.
Internally, this fits several mne.decoding.ReceptiveField models, one for each
target variable.
Please see the :ref:`TRF example notebooks <STRF examples>` for more detailed
tutorials which show how to train, test, and inspect TRF and STRF models.
Parameters
----------
tmin : float
The starting lag (inclusive), in seconds (or samples if ``sfreq`` == 1).
tmax : float
The ending lag (noninclusive), in seconds (or samples if ``sfreq`` == 1).
Must be > tmin.
sfreq : float
The sampling frequency used to convert times into samples.
reg_type : str, default='ridge'
Regularization type. Can be "ridge" (default) or "laplacian".
alpha : float | list or array-like of floats, default=np.logspace(2, 9, 8).
Regularization strength. If a list or array-like of values, then the best
one will be fit with cross-validation. If a list is given, alpha is
optimized for each target variable individually.
xval_test_portion : float, default=0.25
If multiple alpha values are given, cross-validataion is performed to
choose the best, using this portion as withheld test data on each
cross-validation loop.
fit_intercept : bool | None, default=False
If True, the sample mean is removed before fitting.
scoring : str, default='corrcoef'
Defines how predictions will be scored. Currently must be one of
'r2' (coefficient of determination) or 'corrcoef' (the correlation
coefficient).
n_jobs : int | str
Number of jobs to run in parallel. Can be 'cuda' if CuPy
is installed properly and ``estimator is None``.
verbose : int, default=1
Level of printing output desired.
0 prints nothing, 1 prints only a single tqdm progress bar for
the fitting over the n-outputs in .fit(), and 2 prints information about
cross-validation, such as the alpha value chosen and the corresponding scores,
during the fitting procedure for each output.
Notes
-----
For a causal system, the encoding model would have significant
non-zero values only at positive lags. In other words, lags point
backwards in time relative to the input, so positive lags correspond
to previous time samples, while negative lags correspond to future
time samples. In most cases, an encoding model should use tmin=0
and tmax>0, while a decoding model should use tmin<0 and tmax=0.
'''
def __init__(self,
tmin,
tmax,
sfreq,
reg_type='ridge',
alpha=None,
xval_test_portion=0.25,
fit_intercept=False,
scoring='corrcoef',
n_jobs=1,
verbose=1):
if tmin >= tmax:
raise ValueError(f'tmin must be less than tmax, but got {tmin} and {tmax}')
if alpha is None:
alpha = [round(x, 2) for x in np.logspace(2, 9, 8)]
self.sfreq = float(sfreq)
self.tmin = tmin
self.tmax = tmax
self.reg_type = reg_type
self.alpha = np.array([alpha]) if isinstance(alpha, float) or isinstance(alpha, int) else alpha
self.xval_test_portion = xval_test_portion
self.fit_intercept = fit_intercept
self.scoring = scoring
self.n_jobs = n_jobs
self.verbose = verbose
if len(self.alpha) > 1 and self.xval_test_portion > 0.5:
raise ValueError(f'xval_test_portion must be no more than 0.5 if multiple alphas were given,'+
f' so that cross validation can occur correctly, but got {xval_test_portion}')
@property
def _smin(self):
return int(round(self.tmin * self.sfreq))
@property
def _smax(self):
return int(round(self.tmax * self.sfreq)) + 1
def _delay_and_reshape(self, X, y=None):
"""Delay and reshape the variables.
X and y should be arrays.
"""
if not isinstance(self.estimator_, TimeDelayingRidge):
# X is now shape (n_times, n_epochs, n_feats, n_delays)
X = _delay_time_series(X, self.tmin, self.tmax, self.sfreq,
fill_mean=self.fit_intercept)
X = _reshape_for_est(X)
# Concat times + epochs
if y is not None:
y = y.reshape(-1, y.shape[-1], order='F')
return X, y
def fit(self, outstruct=None, X='aud', y='resp'):
'''
Fit a multi-output model to the data in X and y, which contain multiple trials.
Parameters
----------
outstruct : naplib.OutStruct object, optional
OutStruct containing data to be normalized in one of the field.
If not given, must give the X and y data directly as the ``X``
and ``y`` arguments.
X : str | list of np.ndarrays or a multidimensional np.ndarray
Data to be used as predictor in the regression. Once arranged,
should be of shape (time, num_features).
If a string, it must specify one of the fields of the outstruct
provided in the first argument. If a multidimensional array, first dimension
indicates the trial/instances which will be concatenated over to compute
normalization statistics.
y : str | list of | |
#Calc threshold
thr=Num.sqrt(2)*scipy.special.erfinv((1.0-nabove/len(tstmp)))
#Calulate statistics and apply threshold
ts_mean,ts_sig,ts_med=Num.mean(tstmp),Num.std(tstmp),Num.median(tstmp)
ot=Num.where(Num.abs(tstmp-ts_med) < thr*ts_sig)[0]
ot_len=len(ot)
ot_diff=len(tstmp)-ot_len
if Num.round(ts_sig, decimals=1)==0.0: rmszero=1
else: rmszero=0
if debug==True: print "Initial stats: ", ts_mean, ts_med, ts_sig, thr, ot_len
if debug==True: print Num.where((tstmp-ts_med) > thr*ts_sig)[0]
#Loop until sufficiently clean
while ot_diff >= nabove and nloops<6 and ts_sig>0.0:
if rmszero==1: break
ts_mean,ts_sig,ts_med=Num.mean(tstmp[ot]),Num.std(tstmp[ot]),Num.median(tstmp[ot])
try:
thr=Num.sqrt(2)*scipy.special.erfinv((1.0-nabove/len(tstmp[ot])))
except ZeroDivisionError:
print "ZeroDivisionError in scipy.special.erfinv()"
thr=Num.sqrt(2)*scipy.special.erfinv((1.0-nabove/len(tstmp)))
ot=Num.where(Num.abs(tstmp-ts_med) < thr*ts_sig)[0]
ot_diff=ot_len-len(ot)
ot_len=len(ot)
if debug==True: print "Loop number ", nloops, ts_mean, ts_med, ts_sig, thr, ot_len
nloops+=1
if Num.round(ts_sig, decimals=1)==0.0: rmszero=1
else: rmszero=0
if rmszero != 1:
try:
thr=Num.sqrt(2)*scipy.special.erfinv((1.0-nabove/len(tstmp[ot])))
except ZeroDivisionError:
print "ZeroDivisionError in scipy.special.erfinv()"
thr=Num.sqrt(2)*scipy.special.erfinv((1.0-nabove/len(tstmp)))
ot_good=Num.copy(ot)
ot=Num.where(tstmp-ts_med > thr*ts_sig)[0]
#Define where clusters end
cl_end=Num.where(ot[1:]-ot[:-1] > 8)[0]
cl_end=Num.append(cl_end, len(ot)-1)
ot_bad=Num.array([])
p=0
for i in range(len(cl_end)):
clust=ts[ot[p:cl_end[i]+1]]
clen=len(clust)
if debug==True: print p, cl_end[i]+1, ot[p:cl_end[i]+1]
#If a cluster is sufficently broad, define a region
#25% larger than the cluster and mask it as bad
if clen > 1:
off=max(1,clen/4)
slo=max(ot[p]-off, 0)
shi=min(ot[cl_end[i]]+off, len(ts))
ot_bad=Num.append(ot_bad, Num.arange(slo,shi))
if debug==True: print "ot_bad:", ot_bad
p=cl_end[i]+1
ot_good=Num.delete(ot_good, ot_bad)
ts_mean,ts_sig=Num.mean(ts[ot_good]),Num.std(ts[ot_good])
return ts_mean,ts_sig
def clean_timeseries_old(ts, thr, clust_len=4, debug=False):
'''Attempts to clean a time series to get reliable
calculation of mean and std.
It applies a threshold and looks for greater than
length clust_len and takes out a region surrounding it
Inputs:
ts = time series
thr = SNR multiplier for threshold
clust_len = the minimum length assumed for a cluster
debug = will additionally return masked time series
Outputs:
tmean = cleaned mean of time series
tsig = cleaned standard deviation of time series
'''
nloops=0
#Copy time series array and make it a masked array
ts_mask=ma.copy(ts)
#Calulate statistics and apply threshold
ts_mean,ts_sig=Num.mean(ts_mask),Num.std(ts_mask)
ot=Num.where((ts_mask-ts_mean) > thr*ts_sig)[0]
#Define where clusters end
cl_end=Num.where(ot[1:]-ot[:-1] > 2)[0]
cl_end=Num.append(cl_end, len(ot)-1)
if debug==True:
print "First std: %f Num bad: %d" % (ts_sig, len(ot))
print ot
#Loop until sufficiently clean
while nloops<6:
if Num.round(ts_sig, decimals=1)==0.0: break
#Loop over clusters
p=0
for i in range(len(cl_end)):
clust=ts[ot[p:cl_end[i]+1]]
clen=len(clust)
if debug==True: print p, cl_end[i]+1, ot[p:cl_end[i]+1]
#If a cluster is sufficently broad, define a region
#25% larger than the cluster and mask it as bad
if clen > clust_len:
off=clen/4
slo=max(ot[p]-off, 0)
shi=min(ot[cl_end[i]]+off, len(ts))
ts_mask[slo:shi]=ma.masked
#Otherwise just mask the high values
else:
ts_mask[p:cl_end[i]+1]=ma.masked
p=cl_end[i]+1
#Recalculate statistics
if debug==True: print "New: %f\n" % Num.std(ts_mask.data)
if debug==True: print ts_mask.mask
ts_mean_new,ts_sig_new=Num.mean(ts_mask),Num.std(ts_mask)
#See the stats are clean enough
if ts_sig/ts_sig_new - 1.0 < 0.05:
if debug==True: print "Clean"
break
else:
if debug==True: print ts_sig,ts_sig_new
ts_sig,ts_mean=ts_sig_new,ts_mean_new
ot=Num.where((ts_mask.data-ts_mean) > thr*ts_sig)[0]
cl_end=Num.where(ot[1:]-ot[:-1] > 2)[0]
cl_end=Num.append(cl_end, len(ot)-1)
nloops+=1
return ts_mean,ts_sig
def clean_stddev(ts, thr, verbose=False):
# Finds a "clean" std deviation by masking
# outliers until the change in std dev
# is less than about 1%
#ts = time series
#thr = multiplier of std dev for thresholding
#verbose = report mean, std deviation, ratio for each inter
nloops=0
#Make local copy of ts to clean
tstmp=ma.copy(ts)
#Calculate statistics of uncleaned data
sigold=Num.std(tstmp)
tsmean=Num.mean(tstmp)
if Num.round(sigold, decimals=1) == 0.0: return sigold
#return tsmean, sigold
#Do first clean stage
inds=ma.where(tstmp - tsmean > thr*sigold)[0]
# inds=ma.where(tstmp > thr*sigold)[0]
#If there's nothing to clean, just leave (for efficiency)
if len(inds) == 0: return sigold
#return tsmean, sigold
tstmp[inds]=ma.masked
sig=Num.std(tstmp)
tsmean=Num.mean(tstmp)
# inds=Num.where(tstmp-tsmean < -1*(thr)*sig)[0]
# tstmp[inds]=ma.masked
# sig=Num.std(tstmp)
# tsmean=Num.mean(tstmp)
if Num.round(sig, decimals=1) == 0.0: return sig
#return Num.tsmean,sig
# if verbose == True: print mold, sigold
# if verbose == True: print len(inds)
# if verbose == True: print m, sig, sigold/sig-1.0
#Loop through cleaning stages until ratio of
# current stddev is less than 1 % the previous
while nloops<6:
if sigold/sig - 1.0 < 0.01: break
inds=ma.where(tstmp - tsmean > thr*sig)[0]
# inds=ma.where(tstmp > thr*sig)[0]
tstmp[inds]=ma.masked
sigold=sig
sig=Num.std(tstmp)
tsmean=Num.mean(tstmp)
nloops+=1
if verbose==True: print nloops
# return tsmean,sigold
return sigold
def downsample_factor(dm, max=False):
'''Return the downsample factor for a given
DM for a fixed PRESTO DDplan
Currently assumes PALFA Mock data
Input:
dm = dispersion measure
max = If true, return max downsamle regardless of input dm
(default False)
Output:
ds = downsample factor
'''
dm_boundaries=Num.array([212.8, 443.2, 534.4, 876.4, 990.4, 1750.4, 2038.4])
downsample=Num.array([1, 2, 3, 5, 6, 10, 15])
tmp=Num.where(dm < dm_boundaries)[0]
ds = downsample[min(tmp)]
if max==True: ds = downsample[-1]
return ds
def hist_sigma(data, blo=0, bhi=120, bstep=1):
bins=range(blo, bhi+1, bstep)
hist,hb=Num.histogram(data, bins=bins, range=(min(bins),max(bins)))
total=Num.sum(hist)
count=Num.max(hist)
arg=hist.argmax()
nbin=1
offset=1
left=0
while count<total/3:
if left==0: count+=hist[arg+offset]
else: count+=hist[arg-offset]
nbin+=1
if left==0: left=1
else: offset+=1
return bstep*nbin
def flag_last_chunk(bad_blocks, detrendlen, chunklen):
inds=Num.where(bad_blocks[1:]-bad_blocks[:-1] != 1)[0]
if len(inds)==0:
firstbad=bad_blocks[0]
loc=0
else:
firstbad=bad_blocks[inds[-1]+1]
loc=inds[-1]
ol=(firstbad*detrendlen) % chunklen
n2add=ol/detrendlen
new=Num.arange(-1*n2add, 0, 1)+firstbad
bad_blocks=Num.insert(bad_blocks, loc*Num.ones(n2add)+1, new)
print "New bad block: ", new
return bad_blocks
def main():
parser = OptionParser(usage)
parser.add_option("-x", "--xwin", action="store_true", dest="xwin",
default=False, help="Don't make a postscript plot, just use an X-window")
parser.add_option("-p", "--noplot", action="store_false", dest="makeplot",
default=True, help="Look for pulses but do not generate a plot")
parser.add_option("-m", "--maxwidth", type="float", dest="maxwidth", default=0.0,
help="Set the max downsampling in sec (see below for default)")
parser.add_option("-t", "--threshold", type="float", dest="threshold", default=5.0,
help="Set a different threshold SNR (default=5.0)")
parser.add_option("-s", "--start", type="float", dest="T_start", default=0.0,
help="Only plot events occuring after this time (s)")
parser.add_option("-e", "--end", type="float", dest="T_end", default=1e9,
help="Only plot events occuring before this time (s)")
parser.add_option("-g", "--glob", type="string", dest="globexp", default=None,
help="Process the files from this glob expression")
parser.add_option("-f", "--fast", action="store_true", dest="fast",
default=False, help="Use a faster method of de-trending (2x speedup)")
parser.add_option("-i", "--iter", action="store_true", dest="iter",
default=False, help="Use iterative cleaning for stats calc")
parser.add_option("-c", "--clust", action="store_true", dest="doclust",
default=False, help="Also apply cluster algorithm")
parser.add_option("-w", "--clust_maxgap", type="int", dest="maxgap", default=1,
help="Set the maximum gap (in bins) for clustering")
parser.add_option("-r", "--noflag", action="store_true", dest="noflag",
default=False, help="Do not do any RFI flagging")
(opts, args) = parser.parse_args()
if len(args)==0:
if opts.globexp==None:
print full_usage
sys.exit(0)
else:
args = []
for globexp in opts.globexp.split():
args += glob.glob(globexp)
useffts = True
dosearch = True
if opts.xwin:
pgplot_device = "/XWIN"
else:
pgplot_device = ""
# fftlen = 8192 # Should be a power-of-two for best speed
# chunklen = 8000 # Must be at least max_downfact less than fftlen
# detrendlen = 1000 # length of a linear piecewise chunk of data for detrending
fftlen = 65536 # Should be a power-of-two for best speed
chunklen = 64000 # Must be at least max_downfact less than fftlen
detrendlen = 4000 # length of a linear piecewise chunk of data for detrending
blocks_per_chunk = chunklen / detrendlen
overlap = (fftlen - chunklen)/2
worklen = chunklen + 2*overlap # currently it is fftlen...
max_downfact = 30
#LGS: Expanded to include 300, 1000 and 1500
default_downfacts = [2, 3, 4, 6, 9, 14, 20, 30, 45, 70, 100, 150, 300, 500, 1000, 1500]
# default_downfacts = [2, 3, 4, 6, 9, 14, 20, 30, 45, 70, 100, 150]
if args[0].endswith(".singlepulse"):
filenmbase = args[0][:args[0].rfind(".singlepulse")]
dosearch = False
elif args[0].endswith(".cluster"):
filenmbase = args[0][:args[0].rfind(".cluster")]
dosearch = False
elif args[0].endswith(".dat"):
filenmbase = args[0][:args[0].rfind(".dat")]
else:
filenmbase = args[0]
# Don't do a search, just read results and plot
if not dosearch:
info, DMs, candlist, num_v_DMstr = \
read_singlepulse_files(args, opts.threshold, opts.T_start, opts.T_end)
orig_N, orig_dt = int(info.N), info.dt
obstime = orig_N * orig_dt
else:
DMs = []
candlist = []
num_v_DMstr = {}
# Loop over the input files
for filenm in args:
if filenm.endswith(".dat"):
filenmbase = filenm[:filenm.rfind(".dat")]
else:
filenmbase = filenm
info = infodata.infodata(filenmbase+".inf")
DMstr = "%.2f"%info.DM
DMs.append(info.DM)
N, dt = int(info.N), info.dt
obstime = N * dt
dsfact=downsample_factor(info.DM)
# Choose the maximum width to search based on time instead
# of bins. This helps prevent increased S/N when the downsampling
# changes as the DM gets larger.
if opts.maxwidth > 0.0:
downfacts = [x for x in default_downfacts if x*dt <= opts.maxwidth]
else:
downfacts = [x for x in default_downfacts if x <= max_downfact]
if len(downfacts) == 0:
downfacts = [default_downfacts[0]]
if (filenm == args[0]):
orig_N = N
orig_dt = dt
if useffts:
fftd_kerns = make_fftd_kerns(downfacts, fftlen)
if info.breaks:
offregions = zip([x[1] for x in info.onoff[:-1]],
[x[0] for x in info.onoff[1:]])
outfile = open(filenmbase+'.singlepulse', mode='w')
if opts.doclust: outclust = open(filenmbase+'.cluster', mode='w')
# Compute the file | |
in cutsites:
cand_pam = local_seq[local_cutsite + 3 : local_cutsite + 3 + len(pam)]
if lib.match(pam, cand_pam):
num_grnas += 1
return 'PREDICT REPAIR FOR %s gRNAs' % (num_grnas)
@app.callback(
Output('B_submit_button', 'style'),
[Input('B_estimated_runtime', 'children')],
[State('B_submit_button', 'style')])
def update_submit_button_style(est_runtime_text, style):
if 'Error' in est_runtime_text:
style['backgroundColor'] = '#86898C'
style['color'] = 'white'
else:
style['backgroundColor'] = '#00A0DC'
style['color'] = 'white'
return style
##
# Prediction callback
##
@cache.memoize(timeout = cache_timeout)
def indelphi_predict_batch_cache(parameters):
seq, pam, celltype, adv_matchseq, adv_poi, adv_delstart, adv_delend = parameters
# When submit button clicked, find all gRNAs matching PAM in sequence.
# Advanced options:
# if matchseq is provided, include a column on
# sum frequencies of repair gts matching sequence
# e.g., pathogenic -> wildtype repair
# if deletion range is provided, include a column on
# sum frequencies of repair gts deleting specified positions.
# if position of interest is provided, include a column on
# cutsite distance to position of interest
dd = defaultdict(list)
all_stats = pd.DataFrame()
assert pam.count('N') != len(pam)
assert 2 <= len(pam) <= 6
seq = seq.upper()
pam = pam.upper()
# Check and initialize advanced settings
adv_matchseq_flag = False
if adv_matchseq is not None and len(adv_matchseq) != 0:
adv_matchseq = adv_matchseq.upper()
adv_matchseq_flag = True
adv_poi_flag = False
if adv_poi is not None and len(adv_poi) > 0:
# adv_poi is 1-indexed, switch to 0-index
adv_poi = int(adv_poi) - 1
adv_poi_flag = True
adv_del_flag = False
if adv_delstart is not None and adv_delend is not None:
if len(adv_delstart) > 0 and len(adv_delend) > 0:
adv_delstart, adv_delend = int(adv_delstart), int(adv_delend)
if adv_delstart < adv_delend:
adv_delstart -= 1
adv_delend -= 1
adv_del_flag = True
num_grnas = 0
seqs = [seq, lib.revcomp(seq)]
cutsites = range(30, len(seq) - 30)
for local_seq, grna_orient in zip(seqs, ['+', '-']):
for local_cutsite in cutsites:
cand_pam = local_seq[local_cutsite + 3 : local_cutsite + 3 + len(pam)]
if lib.match(pam, cand_pam):
num_grnas += 1
assert 1 <= num_grnas <= 80
# Search for gRNAs matching PAM
seqs = [seq, lib.revcomp(seq)]
cutsites = range(30, len(seq) - 30)
for local_seq, grna_orient in zip(seqs, ['+', '-']):
for local_cutsite in cutsites:
cand_pam = local_seq[local_cutsite + 3 : local_cutsite + 3 + len(pam)]
if lib.match(pam, cand_pam):
dd['gRNA orientation'].append(grna_orient)
dd['gRNA'].append(local_seq[local_cutsite - 17 : local_cutsite + 3])
dd['PAM'].append(cand_pam)
if grna_orient == '+':
cutsite_plus = local_cutsite
else:
cutsite_plus = len(seq) - local_cutsite
dd['Cutsite'].append(cutsite_plus)
# inDelphi predictions and standard statistics
pred_df, stats = inDelphi.predict(local_seq, local_cutsite, celltype)
all_stats = all_stats.append(stats, ignore_index = True)
# Detailed link
sm_link = lib.encode_dna_to_url_path_single(local_seq, local_cutsite, celltype)
dd['URL'].append('%s' % (sm_link))
if adv_matchseq_flag or adv_del_flag:
stats = pd.DataFrame(stats, index = [0])
pred_df = inDelphi.add_mhless_genotypes(pred_df, stats)
# Handle advanced options
if adv_matchseq_flag:
inDelphi.add_genotype_column(pred_df, stats)
crit = (pred_df['Genotype'] == adv_matchseq)
matched_seq_freq = sum(pred_df[crit]['Predicted frequency'])
dd['Repairs to spec.'].append(matched_seq_freq)
if adv_poi_flag:
if adv_poi > cutsite_plus:
dist = abs(cutsite_plus - 1 - adv_poi)
else:
dist = abs(cutsite_plus - adv_poi)
dd['Dist. to POI'].append(dist)
if adv_del_flag:
crit = (pred_df['Category'] == 'del')
delseq_freq = 0
if grna_orient == '+':
adv_delstart_local = adv_delstart
adv_delend_local = adv_delend
else:
adv_delstart_local = len(seq) - adv_delend
adv_delend_local = len(seq) - adv_delstart
for jdx, row in pred_df[crit].iterrows():
mh_len = row['Microhomology length']
del_start = local_cutsite - row['Length'] + row['Genotype position']
del_end = del_start + row['Length']
contains_deletion = False
for mhl in range(int(mh_len) + 1):
if del_start - mhl <= adv_delstart_local < adv_delend_local <= del_end - mhl:
contains_deletion = True
if contains_deletion:
delseq_freq += row['Predicted frequency']
dd['Deletes spec.'].append(delseq_freq)
# Add metadata columns and advanced settings
for col in dd:
all_stats[col] = dd[col]
# Switch phi to log phi
all_stats['MH strength'] = np.log(all_stats['Phi'])
all_stats = all_stats.drop(['Phi'], axis = 1)
# Sort by cutsite and relabel indices
all_stats = all_stats.sort_values(by = 'Cutsite')
all_stats = all_stats.reset_index(drop = True)
all_stats['ID'] = all_stats.index + 1
return all_stats
@app.callback(
Output('B_hidden-pred-df-stats-signal', 'children'),
[Input('B_submit_button', 'n_clicks')],
[State('B_textarea', 'value'),
State('B_textbox_pam', 'value'),
State('B_celltype_dropdown', 'value'),
State('B_adv_matchseq', 'value'),
State('B_adv_position_of_interest', 'value'),
State('B_adv_delstart', 'value'),
State('B_adv_delend', 'value'),
])
def update_pred_df_stats(nclicks, seq, pam, celltype, adv_matchseq, adv_poi, adv_delstart, adv_delend):
if nclicks == 0 or nclicks is None:
assert False, 'init'
parameters = (seq, pam, celltype, adv_matchseq, adv_poi, adv_delstart, adv_delend)
indelphi_predict_batch_cache(parameters)
return parameters
##
# Module header callbacks, Advanced options hiding/showing
##
@app.callback(
Output('B_postcomp_module_header', 'children'),
[Input('B_hidden-pred-df-stats-signal', 'children')],
[State('B_textarea', 'value'),
State('B_textbox_pam', 'value')])
def update_postcomp_module_header(signal, seq, pam):
if signal == 'init':
assert False, 'init'
stats = indelphi_predict_batch_cache(signal)
return 'Results of %s gRNAs with %s PAM found in %s-bp query' % (len(stats), pam, len(seq))
@app.callback(
Output('B_advanced_options_body', 'style'),
[Input('B_advanced_options_header', 'n_clicks'),
Input('B_url', 'pathname')],
[State('B_advanced_options_body', 'style')])
def update_adv_options_body_style(n_clicks, url, prev_style):
new_style = prev_style
if n_clicks is None:
valid_flag, dd = lib.parse_valid_url_path_batch(url)
if valid_flag and dd['adv_flag'] == True:
del new_style['display']
elif n_clicks > 0: # ignore first automatic click triggered by page load
if 'display' in prev_style:
del new_style['display']
else:
new_style['display'] = 'none'
return new_style
@app.callback(
Output('B_advanced_options_header_text', 'children'),
[Input('B_advanced_options_header', 'n_clicks')],
[State('B_advanced_options_header_text', 'children')])
def update_adv_options_header_text(n_clicks, prev_text):
if n_clicks is None:
assert False, 'init'
if n_clicks > 0:
if '▶' in prev_text:
new_arrow = '▼'
else:
new_arrow = '▶'
return '%s Advanced options' % (new_arrow)
##
# Column selection and sorting callbacks
##
@app.callback(
Output('B_dropdown-sortcol', 'options'),
[Input('B_dropdown-columns', 'value')])
def update_sortcol_options(values):
options = []
for value in values:
options.append({'label': value, 'value': value})
return options
@app.callback(
Output('B_dropdown-sortcol', 'value'),
[Input('B_dropdown-sortcol', 'options')],
[State('B_url', 'pathname'),
State('B_dropdown-sortcol', 'value'),
State('B_advanced_options_module', 'n_clicks'),
State('B_row_dropdown-columns', 'n_clicks'),
State('B_row_dropdown-sortcol', 'n_clicks'),
])
def update_sortcol_value_from_url(options, url, prev_value, nc1, nc2, nc3):
if nc1 or nc2 or nc3:
# If clicked on any module that might change the sortcol
return prev_value
valid_flag, dd = lib.parse_valid_url_path_batch(url)
if not valid_flag or dd['sort_by'] == '-':
return prev_value
else:
all_options = [s['value'] for s in options]
idx = int(dd['sort_by'])
return sorted(all_options)[idx]
@app.callback(
Output('B_dropdown-columns', 'options'),
[Input('B_hidden-pred-df-stats-signal', 'children')],
[State('B_dropdown-columns', 'options')]
)
def update_columns_options(signal, prev_options):
if signal == 'init':
assert False, 'init'
stats = indelphi_predict_batch_cache(signal)
options = prev_options
for d in ['Repairs to spec.', 'Deletes spec.', 'Dist. to POI']:
td = {'label': d, 'value': d}
if d in stats.columns:
if td not in options:
options.append(td)
else:
if td in options:
options.remove(td)
return options
@app.callback(
Output('B_dropdown-columns', 'value'),
[Input('B_dropdown-columns', 'options')],
[State('B_dropdown-columns', 'value'),
State('B_url', 'pathname'),
State('B_row_dropdown-columns', 'n_clicks')]
)
def update_columns_value(options, prev_value, url, n_clicks):
value = prev_value
all_options = [s['value'] for s in options]
for td in ['Repairs to spec.', 'Deletes spec.', 'Dist. to POI']:
if td in all_options:
if td not in value:
value.append(td)
else:
if td in value:
value.remove(td)
if n_clicks is None or n_clicks == 0:
valid_flag, dd = lib.parse_valid_url_path_batch(url)
if valid_flag:
value = []
alphabetical_options = sorted(all_options)
for idx, flag in enumerate(dd['chosen_columns']):
if flag == '1':
value.append(alphabetical_options[idx])
return value
@app.callback(
Output('B_sortdirection', 'value'),
[Input('B_dropdown-sortcol', 'options')],
[State('B_url', 'pathname'),
State('B_sortdirection', 'value')])
def update_sortdir_from_url(sort_options, url, prev_value):
valid_flag, dd = lib.parse_valid_url_path_batch(url)
if valid_flag:
return dd['sort_dir']
else:
return prev_value
##
# Stats table callbacks
##
@cache.memoize(timeout = cache_timeout)
def make_table_stats_cache(parameters):
parameters = json.loads(parameters)
signal, chosen_columns, sort_col, sort_direction = parameters
stats = indelphi_predict_batch_cache(signal)
# Drop extra cols
drop_cols = [
'Reference sequence',
'1-bp ins frequency',
'MH del frequency',
'MHless del frequency',
]
stats = stats.drop(drop_cols, axis = 1)
# Rename to shorter versions
stats = lib.rename_batch_columns(stats)
# Sort by, if possible
if sort_col is not None and sort_direction is not None:
if sort_direction == 'Ascending':
ascending_flag = True
else:
ascending_flag = False
stats = stats.sort_values(by = sort_col, ascending = ascending_flag)
# Reformat floats
stats_cols = list(stats.columns)
nonstat_cols = ['ID', 'gRNA', 'gRNA orientation', 'PAM', 'URL', 'Celltype']
for nonstat_col in nonstat_cols:
stats_cols.remove(nonstat_col)
for stat_col in stats_cols:
# Filter down to selected columns
if stat_col not in chosen_columns:
stats.drop(stat_col, axis = 1, inplace = True)
continue
# Reformat
if stat_col in ['Precision', 'MH strength']:
stats[stat_col] = [float('%.2f' % (s)) for s in stats[stat_col]]
else:
stats[stat_col] = [float('%.1f' % (s)) for s in stats[stat_col]]
# Reorder columns
stats = stats[nonstat_cols + lib.order_chosen_columns(chosen_columns)]
return stats
@app.callback(
Output('B_table-stats-signal', 'children'),
[Input('B_hidden-pred-df-stats-signal', 'children'),
Input('B_dropdown-columns', 'value'),
Input('B_dropdown-sortcol', 'value'),
Input('B_sortdirection', 'value'),
])
def update_stats_table(signal, chosen_columns, sort_col, sort_direction):
if signal == 'init':
assert False, 'init'
parameters = (signal, chosen_columns, sort_col, sort_direction)
parameters = json.dumps(parameters)
make_table_stats_cache(parameters)
return parameters
@app.callback(
Output('B_table-stats', 'selected_row_indices'),
[Input('B_hidden-clickData', 'children'),
Input('B_hidden-cache-submit-button', 'children'),
Input('B_dropdown-columns', 'value'),
Input('B_dropdown-sortcol', 'value'),
Input('B_table-stats-signal', 'children')],
[State('B_table-stats', 'selected_row_indices'),
State('B_hidden-sort-module-interaction', 'children'),
State('B_hidden-selected-id', 'children'),
State('B_url', 'pathname'),
State('B_postcomputation_settings', 'n_clicks'),
State('B_plot-stats-div', 'n_clicks'),
State('B_submit_button', 'n_clicks'),
])
def update_statstable_selected(clickData, submit_time, col_values, sortcol_value, table_signal, selected_row_indices, sort_time, prev_id, url, nc1, nc2, nc_submit):
if not bool(nc1 or nc2) and nc_submit == 1:
| |
{
'name': snapshot_name,
'volume': volume.to_dict()
}
if snapmirror_label:
if print_output:
print("Setting snapmirror label as:"+snapmirror_label)
snapshotDict['snapmirror_label'] = snapmirror_label
# Create snapshot
snapshot = NetAppSnapshot.from_dict(snapshotDict)
snapshot.post(poll=True, poll_timeout=120)
if print_output:
print("Snapshot created successfully.")
except NetAppRestError as err:
if print_output:
print("Error: ONTAP Rest API Error: ", err)
raise APIConnectionError(err)
#delete snapshots exceeding retention count if provided
retention_count = int(retention_count)
if retention_count > 0:
try:
# Retrieve all source snapshot from last to 1st
# Retrieve volume
volume = NetAppVolume.find(name=volume_name, svm=svm)
if not volume:
if print_output:
print("Error: Invalid volume name.")
raise InvalidVolumeParameterError("name")
if retention_days:
retention_date = datetime.datetime.today() - datetime.timedelta(days=retention_count)
last_snapshot_list = []
snapshot_list = []
for snapshot in NetAppSnapshot.get_collection(volume.uuid):
snapshot.get()
if snapshot.name.startswith(snapshot_name_original+'.'):
if not retention_days:
snapshot_list.append(snapshot.name)
last_snapshot_list.append(snapshot.name)
if len(last_snapshot_list) > retention_count:
last_snapshot_list.pop(0)
else:
rx = r'^{0}\.(.+)$'.format(snapshot_name_original)
matchObj = re.match(rx,snapshot.name)
if matchObj:
snapshot_date = matchObj.group(1)
snapshot_date_obj = datetime.datetime.strptime(snapshot_date, "%Y-%m-%d_%H%M%S")
snapshot_list.append(snapshot.name)
last_snapshot_list.append(snapshot.name)
if snapshot_date_obj < retention_date:
last_snapshot_list.pop(0)
#delete snapshots not in retention
for snap in snapshot_list:
if snap not in last_snapshot_list:
delete_snapshot(volume_name=volume_name, svm_name = svm, snapshot_name=snap, skip_owned=True, print_output=True)
except NetAppRestError as err:
if print_output:
print("Error: ONTAP Rest API Error: ", err)
raise APIConnectionError(err)
else:
raise ConnectionTypeError()
def create_volume(volume_name: str, volume_size: str, guarantee_space: bool = False, cluster_name: str = None, svm_name: str = None,
volume_type: str = "flexvol", unix_permissions: str = "0777",
unix_uid: str = "0", unix_gid: str = "0", export_policy: str = "default",
snapshot_policy: str = None, aggregate: str = None, mountpoint: str = None, junction: str = None, readonly: bool = False,
print_output: bool = False, tiering_policy: str = None, vol_dp: bool = False):
# Retrieve config details from config file
try:
config = _retrieve_config(print_output=print_output)
except InvalidConfigError:
raise
try:
connectionType = config["connectionType"]
except:
if print_output:
_print_invalid_config_error()
raise InvalidConfigError()
if cluster_name:
config["hostname"] = cluster_name
if connectionType == "ONTAP":
# Instantiate connection to ONTAP cluster
try:
_instantiate_connection(config=config, connectionType=connectionType, print_output=print_output)
except InvalidConfigError:
raise
# Retrieve values from config file if not passed into function
try:
svm = config["svm"]
if svm_name:
svm = svm_name
if not volume_type :
volume_type = config["defaultVolumeType"]
if not unix_permissions :
unix_permissions = config["defaultUnixPermissions"]
if not unix_uid :
unix_uid = config["defaultUnixUID"]
if not unix_gid :
unix_gid = config["defaultUnixGID"]
if not export_policy :
export_policy = config["defaultExportPolicy"]
if not snapshot_policy :
snapshot_policy = config["defaultSnapshotPolicy"]
if not aggregate and volume_type == 'flexvol' :
aggregate = config["defaultAggregate"]
except:
if print_output :
_print_invalid_config_error()
raise InvalidConfigError()
# Check volume type for validity
if volume_type not in ("flexvol", "flexgroup"):
if print_output:
print("Error: Invalid volume type specified. Acceptable values are 'flexvol' and 'flexgroup'.")
raise InvalidVolumeParameterError("size")
# Check unix permissions for validity
if not re.search("^0[0-7]{3}", unix_permissions):
if print_output:
print("Error: Invalid unix permissions specified. Acceptable values are '0777', '0755', '0744', etc.")
raise InvalidVolumeParameterError("unixPermissions")
# Check unix uid for validity
try:
unix_uid = int(unix_uid)
except:
if print_output :
print("Error: Invalid unix uid specified. Value be an integer. Example: '0' for root user.")
raise InvalidVolumeParameterError("unixUID")
# Check unix gid for validity
try:
unix_gid = int(unix_gid)
except:
if print_output:
print("Error: Invalid unix gid specified. Value must be an integer. Example: '0' for root group.")
raise InvalidVolumeParameterError("unixGID")
# Convert volume size to Bytes
if re.search("^[0-9]+MB$", volume_size):
# Convert from MB to Bytes
volumeSizeBytes = int(volume_size[:len(volume_size)-2]) * 1024**2
elif re.search("^[0-9]+GB$", volume_size):
# Convert from GB to Bytes
volumeSizeBytes = int(volume_size[:len(volume_size)-2]) * 1024**3
elif re.search("^[0-9]+TB$", volume_size):
# Convert from TB to Bytes
volumeSizeBytes = int(volume_size[:len(volume_size)-2]) * 1024**4
else :
if print_output:
print("Error: Invalid volume size specified. Acceptable values are '1024MB', '100GB', '10TB', etc.")
raise InvalidVolumeParameterError("size")
# Create option to choose junction path.
if junction:
junction=junction
else:
junction = "/"+volume_name
#check tiering policy
if not tiering_policy in ['none','auto','snapshot-only','all', None]:
if print_output:
print("Error: tiering policy can be: none,auto,snapshot-only or all")
raise InvalidVolumeParameterError("tieringPolicy")
#vol dp type
if vol_dp:
# Create dict representing volume of type dp
volumeDict = {
"name": volume_name,
"comment": "netapp-dataops",
"svm": {"name": svm},
"size": volumeSizeBytes,
"style": volume_type,
"type": 'dp'
}
else:
# Create dict representing volume
volumeDict = {
"name": volume_name,
"comment": "netapp-dataops",
"svm": {"name": svm},
"size": volumeSizeBytes,
"style": volume_type,
"nas": {
"path": junction,
"export_policy": {"name": export_policy},
"security_style": "unix",
"unix_permissions": unix_permissions,
"uid": unix_uid,
"gid": unix_gid
},
"snapshot_policy": {"name": snapshot_policy},
}
# Set space guarantee field
if guarantee_space:
volumeDict["guarantee"] = {"type": "volume"}
else:
volumeDict["guarantee"] = {"type": "none"}
# If flexvol -> set aggregate field
if volume_type == "flexvol":
volumeDict["aggregates"] = [{'name': aggregate}]
else:
if aggregate:
volumeDict["aggregates"] = []
for aggr in aggregate.split(','):
volumeDict["aggregates"].append({'name': aggr})
#if tiering policy provided
if tiering_policy:
volumeDict['tiering'] = {'policy': tiering_policy}
# Create volume
if print_output:
print("Creating volume '" + volume_name + "' on svm '" + svm + "'")
try:
volume = NetAppVolume.from_dict(volumeDict)
volume.post(poll=True, poll_timeout=120)
if print_output:
print("Volume created successfully.")
except NetAppRestError as err:
if print_output:
print("Error: ONTAP Rest API Error: ", err)
raise APIConnectionError(err)
# Optionally mount newly created volume
if mountpoint:
try:
mount_volume(volume_name=volume_name, svm_name=svm, mountpoint=mountpoint, readonly=readonly, print_output=True)
except (InvalidConfigError, APIConnectionError, InvalidVolumeParameterError, MountOperationError):
if print_output:
print("Error: Error mounting volume.")
raise
else:
raise ConnectionTypeError()
def delete_snapshot(volume_name: str, snapshot_name: str, cluster_name: str = None, svm_name: str = None, skip_owned: bool = False, print_output: bool = False):
# Retrieve config details from config file
try:
config = _retrieve_config(print_output=print_output)
except InvalidConfigError:
raise
try:
connectionType = config["connectionType"]
except:
if print_output:
_print_invalid_config_error()
raise InvalidConfigError()
if cluster_name:
config["hostname"] = cluster_name
if connectionType == "ONTAP":
# Instantiate connection to ONTAP cluster
try:
_instantiate_connection(config=config, connectionType=connectionType, print_output=print_output)
except InvalidConfigError:
raise
# Retrieve svm from config file
try:
svm = config["svm"]
if svm_name:
svm = svm_name
except:
if print_output:
_print_invalid_config_error()
raise InvalidConfigError()
if print_output:
print("Deleting snapshot '" + snapshot_name + "'.")
try:
# Retrieve volume
volume = NetAppVolume.find(name=volume_name, svm=svm)
if not volume:
if print_output:
print("Error: Invalid volume name.")
raise InvalidVolumeParameterError("name")
# Retrieve snapshot
snapshot = NetAppSnapshot.find(volume.uuid, name=snapshot_name)
if not snapshot:
if print_output:
print("Error: Invalid snapshot name.")
raise InvalidSnapshotParameterError("name")
if hasattr(snapshot,'owners'):
if not skip_owned:
if print_output:
print('Error: Snapshot cannot be deleted since it has owners:'+','.join(snapshot.owners))
raise InvalidSnapshotParameterError("name")
else:
if print_output:
print('Warning: Snapshot cannot be deleted since it has owners:'+','.join(snapshot.owners))
return
# Delete snapshot
snapshot.delete(poll=True, poll_timeout=120)
if print_output:
print("Snapshot deleted successfully.")
except NetAppRestError as err :
if print_output:
print("Error: ONTAP Rest API Error: ", err)
raise APIConnectionError(err)
else:
raise ConnectionTypeError()
def delete_volume(volume_name: str, cluster_name: str = None, svm_name: str = None, delete_mirror: bool = False,
delete_non_clone: bool = False, print_output: bool = False):
# Retrieve config details from config file
try:
config = _retrieve_config(print_output=print_output)
except InvalidConfigError:
raise
try:
connectionType = config["connectionType"]
except:
if print_output:
_print_invalid_config_error()
raise InvalidConfigError()
if cluster_name:
config["hostname"] = cluster_name
if connectionType == "ONTAP":
# Instantiate connection to ONTAP cluster
try:
_instantiate_connection(config=config, connectionType=connectionType, print_output=print_output)
except InvalidConfigError:
raise
# Retrieve svm from config file
try:
svm = config["svm"]
if svm_name:
svm = svm_name
except:
if print_output :
_print_invalid_config_error()
raise InvalidConfigError()
try:
# Retrieve volume
volume = NetAppVolume.find(name=volume_name, svm=svm)
if not volume:
if print_output:
print("Error: Invalid volume name.")
raise InvalidVolumeParameterError("name")
if not "CLONENAME:" in volume.comment and not delete_non_clone:
if print_output:
print("Error: volume is not a clone created by this tool. add --delete-non-clone to delete it")
raise InvalidVolumeParameterError("delete-non-clone")
except NetAppRestError as err:
if print_output:
print("Error: ONTAP Rest API Error: ", err)
raise APIConnectionError(err)
if delete_mirror:
#check if this volume has snapmirror destination relationship
uuid = None
try:
snapmirror_relationship = NetAppSnapmirrorRelationship.get_collection(**{"destination.path": svm+":"+volume_name})
for rel in snapmirror_relationship:
# Retrieve relationship details
rel.get()
uuid = rel.uuid
except NetAppRestError as err:
if print_output:
print("Error: ONTAP Rest API Error: ", err)
if uuid:
if print_output:
print("Deleting snapmirror relationship: "+svm+":"+volume_name)
try:
deleteRelation = NetAppSnapmirrorRelationship(uuid=uuid)
deleteRelation.delete(poll=True, poll_timeout=120)
except NetAppRestError as err:
if print_output:
print("Error: ONTAP Rest API Error: ", err)
#check if this volume has snapmirror destination relationship
uuid = None
try:
snapmirror_relationship = NetAppSnapmirrorRelationship.get_collection(list_destinations_only=True,**{"source.path": svm+":"+volume_name})
for rel in snapmirror_relationship:
# Retrieve relationship details
rel.get(list_destinations_only=True)
uuid = rel.uuid
if print_output:
print("release relationship: "+rel.source.path+" -> "+rel.destination.path)
deleteRelation = NetAppSnapmirrorRelationship(uuid=uuid)
deleteRelation.delete(poll=True, poll_timeout=120,source_only=True)
except NetAppRestError as err:
if print_output:
| |
import pandas as pd
import numpy as np
import logging
import os
import geojson
import math
import itertools
import geopandas as gpd
from geopy.geocoders import Nominatim
from shapely.geometry import Point, Polygon, MultiPolygon, shape
import shapely.ops
from pyproj import Proj
from bs4 import BeautifulSoup
import requests
from abc import ABC, abstractmethod
from .geolocations import *
from .visualizations import *
logging.basicConfig(level=logging.WARNING)
class OpinionNetworkModel(ABC):
""" Abstract base class for network model """
def __init__(self,
probabilities = [.45,.1,.45],
power_law_exponent = 1.5,
openness_to_neighbors = 1.5,
openness_to_influencers = 1.5,
distance_scaling_factor = 1/10,
importance_of_weight = 1.6,
importance_of_distance = 8.5,
include_opinion = True,
include_weight = True,
left_reach = 0.8,
right_reach = 0.8,
threshold = -1
):
"""Returns initialized OpinionNetworkModel.
Inputs:
probabilities: (list) probabilities of each mode; these are the
values "p_0,p_1,p_2" from [1].
power_law_exponent: (float) exponent of power law, must be > 0;
this is "gamma" from [1].
openness_to_neighbors: (float) maximum inter-mode distance that agents
can influence; this is "b" from [1].
openness_to_influencers: (float) distance in opinion space that
mega-influencers can reach; this is "epsilon" from [1].
distance_scaling_factor: (float) Scale distancy by this amount, must
be >0; this is "lambda" from [1].
importance_of_weight: (float) Raise weights to this power, must be > 0;
this is "alpha" from [1].
importance_of_distance: (float) Raise adjusted distance to this power,
must be > 0; this is "delta" from [1].
include_opinion: (boolean) If True, include distance in opinion space
in the probability measure.
include_weight: (boolean) If True, include influencer weight in the
probability measure.
left_reach: (float) this is the proportion of the susceptible population
that the left mega-influencers will actually reach, must be between
0 and 1; this is p_L from [1]
right_reach: (float) this is the proportion of the susceptible population
that the right mega-influencers will actually reach, must be between
0 and 1; this is p_R from [1]
threshold: (int) value below which opinions no longer change.
Outputs:
Fully initialized OpinionNetwork instance.
"""
self.probabilities = probabilities
self.power_law_exponent = power_law_exponent
self.openness_to_neighbors = openness_to_neighbors
self.openness_to_influencers = openness_to_influencers
self.distance_scaling_factor = distance_scaling_factor
self.importance_of_weight = importance_of_weight
self.importance_of_distance = importance_of_distance
self.include_opinion = include_opinion
self.include_weight = include_weight
self.left_reach = left_reach
self.right_reach = right_reach
self.threshold = threshold
self.agent_df = None
self.belief_df = None
self.prob_df = None
self.adjacency_df = None
self.mega_influencer_df = None
self.clustering_coefficient = 0
self.mean_degree = 0
def populate_model(self, num_agents = None, geo_df = None, bounding_box = None, show_plot = False):
""" Fully initialized but untrained OpinionNetworkModel instance.
Input:
num_agents: (int) number of agents to plot.
geo_df: (dataframe) geographic datatframe including county geometry.
bounding_box: (list) list of 4 vertices determining a bounding box
where agents are to be added. If no box is given, agents are added
to a random triangle.
show_plot: (bool) if true then plot is shown.
Output:
OpinionNetworkModel instance.
"""
if bounding_box is None:
agent_df = self.add_random_agents_to_triangle(num_agents = num_agents,
geo_df = geo_df,
show_plot = False)
else:
if geo_df is None:
raise ValueError("If a bounding box is specified, then a "
"geo_df must also be given.")
agent_df = self.add_random_agents_to_triangles(geo_df = geo_df,
bounding_box = bounding_box,
show_plot = False)
logging.info("\n {} agents added.".format(agent_df.shape[0]))
belief_df = self.assign_weights_and_beliefs(agent_df)
logging.info("\n Weights and beliefs assigned.")
prob_df = self.compute_probability_array(belief_df)
adjacency_df = self.compute_adjacency(prob_df)
logging.info("\n Adjacencies computed.")
# Connect mega-influencers
mega_influencer_df = self.connect_mega_influencers(belief_df)
# Compute network statistics.
logging.info("\n Computing network statistics...")
cc, md = self.compute_network_stats(adjacency_df)
logging.info("\n Clustering Coefficient: {}".format(cc))
logging.info("\n Mean Degree: {}".format(md))
self.agent_df = agent_df
self.belief_df = belief_df
self.prob_df = prob_df
self.adjacency_df = adjacency_df
self.mega_influencer_df = mega_influencer_df
self.clustering_coefficient = cc
self.mean_degree = md
if show_plot == True:
self.plot_initial_network()
return None
def plot_initial_network(self):
plot_network(self)
return None
def add_random_agents_to_triangle(self, num_agents, geo_df = None, triangle_object = None,
show_plot = False):
""" Assign N points on a triangle using Poisson point process.
Input:
num_agents: (int) number of agents to add to the triangle. If None,
then agents are added according to density.
geo_df: (dataframe) geographic datatframe including county geometry.
triangle_object: (Polygon) bounded triangular region to be populated.
show_plot: (bool) if true then plot is shown.
Returns:
An num_agents x 2 dataframe of point coordinates.
"""
if triangle_object is None:
# If no triangle is given, initialize triangle with area 1 km^2.
triangle_object = Polygon([[0,0],[1419,0], [1419/2,1419],[0,0]])
# If density is specified, adjust triangle size.
if geo_df is not None:
density = geo_df.loc[0,"density"]
b = 1419 * (num_agents/density) ** (1/2)
triangle_object = Polygon([[0,0],[b,0], [b/2,b],[0,0]])
bnd = list(triangle_object.boundary.coords)
gdf = gpd.GeoDataFrame(geometry = [triangle_object])
# Establish initial CRS
gdf.crs = "EPSG:3857"
# Set CRS to lat/lon
gdf = gdf.to_crs(epsg=4326)
# Extract coordinates
co = list(gdf.loc[0,"geometry"].exterior.coords)
lon, lat = zip(*co)
pa = Proj(
"+proj=aea +lat_1=37.0 +lat_2=41.0 +lat_0=39.0 +lon_0=-106.55")
x, y = pa(lon, lat)
coord_proj = {"type": "Polygon", "coordinates": [zip(x, y)]}
area = shape(coord_proj).area / (10 ** 6) # area in km^2
# Get Vertices
V1 = np.array(bnd[0])
V2 = np.array(bnd[1])
V3 = np.array(bnd[2])
# Sample from uniform distribution on [0,1]
U = np.random.uniform(0,1,num_agents)
V = np.random.uniform(0,1,num_agents)
UU = np.where(U + V > 1, 1-U, U)
VV = np.where(U + V > 1, 1-V, V)
# Shift triangle into origin and and place points.
agents = (UU.reshape(len(UU),-1) * (V2 - V1).reshape(-1,2)) + (
VV.reshape(len(VV),-1) * (V3 - V1).reshape(-1,2))
# Shift points back to original position.
agents = agents + V1.reshape(-1,2)
agent_df = pd.DataFrame(agents, columns = ["x","y"])
if show_plot == True:
plot_agents_on_triangle(triangle_object, agent_df)
return agent_df
def add_random_agents_to_triangles(self, geo_df, bounding_box = None, show_plot = False):
""" Plots county with triangular regions.
Inputs:
geo_df: (dataframe) geographic datatframe including county geometry.
bounding_box: (list) list of 4 vertices determining a bounding box
where agents are to be added. If no box is given, then the
bounding box is taken as the envelope of the county.
show_plot: (bool) if true then plot is shown.
Returns:
Populated triangles in specified county enclosed in the given
bounding box where regions are filled with proper density using a Poisson
point process.
"""
tri_dict = make_triangulation(geo_df)
tri_df = gpd.GeoDataFrame({"geometry":[Polygon(t) for t in tri_dict["geometry"]["coordinates"]]})
# Establish initial CRS
tri_df.crs = "EPSG:3857"
# Set CRS to lat/lon.
tri_df = tri_df.to_crs(epsg=4326)
# Get triangles within bounding box.
if bounding_box is None:
geo_df.crs = "EPSG:3857"
geo_df = geo_df.to_crs(epsg=4326)
sq_df = gpd.GeoDataFrame(geo_df["geometry"])
else:
sq_df = gpd.GeoDataFrame({"geometry":[Polygon(bounding_box)]})
inset = [i for i in tri_df.index if tri_df.loc[i,"geometry"].within(sq_df.loc[0,"geometry"])]
# Load triangle area.
agent_df = pd.DataFrame()
for i in inset:
co = list(tri_df.loc[i,"geometry"].exterior.coords)
lon, lat = zip(*co)
pa = Proj(
"+proj=aea +lat_1=37.0 +lat_2=41.0 +lat_0=39.0 +lon_0=-106.55")
x, y = pa(lon, lat)
coord_proj = {"type": "Polygon", "coordinates": [zip(x, y)]}
area = shape(coord_proj).area / (10 ** 6) # area in km^2
num_agents = int(area * geo_df.loc[0,"density"])
df = pd.DataFrame(columns = ["x","y"])
if num_agents > 0:
df = self.add_random_agents_to_triangle(num_agents,
geo_df = geo_df,
triangle_object = tri_df.loc[i,"geometry"],
show_plot = False)
agent_df = pd.concat([agent_df,df])
agent_df.reset_index(drop = True, inplace = True)
# Plot triangles.
if show_plot == True:
fig, ax = plt.subplots(figsize = (10,10))
tri_df.loc[inset,:].boundary.plot(ax = ax, alpha=1,
linewidth = 3,
edgecolor = COLORS["light_blue"])
ax.scatter(agent_df["x"], agent_df["y"], s = 3)
ax.set_axis_off()
ax.set_aspect(.9)
plt.show()
return agent_df
def assign_weights_and_beliefs(self, agent_df, show_plot = False):
""" Assign weights and beliefs (i.e. modes) accoring to probabilities.
Inputs:
agent_df: (dataframe) xy-coordinates for agents.
show_plot: (bool) if true then plot is shown.
Returns:
Dataframe with xy-coordinates, beliefs, and weights for each point.
"""
belief_df = agent_df.copy()
power_law_exponent = self.power_law_exponent
k = -1/(power_law_exponent)
modes = [i for i in range(len(self.probabilities))]
assert np.sum(np.array(self.probabilities)) == 1, "Probabilities must sum to 1."
belief_df["weight"] = np.random.uniform(0,1,belief_df.shape[0]) ** (k)
belief_df["belief"] = np.random.choice(modes, belief_df.shape[0],
p = self.probabilities)
belief_df["decile"] = pd.qcut(belief_df["weight"], q = 100, labels = [
i for i in range(1,101)])
if show_plot == True:
plot_agents_with_belief_and_weight(belief_df)
return belief_df
def compute_probability_array(self, belief_df):
""" Return dataframe of probability that row n influences column m.
Inputs:
| |
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UpdateServiceRequest()
model.from_json_string(json.dumps(args))
rsp = client.UpdateService(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeIPStrategyApisStatus(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeIPStrategyApisStatusRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeIPStrategyApisStatus(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUnReleaseService(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UnReleaseServiceRequest()
model.from_json_string(json.dumps(args))
rsp = client.UnReleaseService(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyApiIncrement(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyApiIncrementRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyApiIncrement(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeServiceEnvironmentReleaseHistory(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeServiceEnvironmentReleaseHistoryRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeServiceEnvironmentReleaseHistory(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeApiUsagePlan(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeApiUsagePlanRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeApiUsagePlan(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteApi(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteApiRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteApi(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeAPIDocs(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeAPIDocsRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeAPIDocs(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeIPStrategysStatus(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeIPStrategysStatusRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeIPStrategysStatus(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeServiceUsagePlan(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeServiceUsagePlanRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeServiceUsagePlan(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyServiceEnvironmentStrategy(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyServiceEnvironmentStrategyRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyServiceEnvironmentStrategy(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateUsagePlan(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateUsagePlanRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateUsagePlan(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteApiKey(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteApiKeyRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteApiKey(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyService(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyServiceRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyService(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUpdateApiKey(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UpdateApiKeyRequest()
model.from_json_string(json.dumps(args))
rsp = client.UpdateApiKey(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyUsagePlan(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyUsagePlanRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyUsagePlan(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doBindEnvironment(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.BindEnvironmentRequest()
model.from_json_string(json.dumps(args))
rsp = client.BindEnvironment(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyAPIDoc(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyAPIDocRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyAPIDoc(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], | |
indcs
else:
return None, None
def get_indices_for_n_batches(self, number_of_batches, batch_size=None):
"""Predetermine the buffer indices for sampling a number of batches.
The buffer indices are returned and can be given to get_batch() to get those specific
experience
Args:
number_of_batches: int, required, number of batches to return indices for
batch_size: int, optional, the number of experiences per batch. If not specified the
given during initialization is used.
Returns: numpy array of batches * batch_size with the indices
"""
batch_size = batch_size or self._properties['batch_size']
if number_of_batches > 0:
indices = np.empty((number_of_batches, batch_size), dtype=np.int32)
indices.fill(np.nan)
for bi in range(number_of_batches):
idcs = self.sample_policy.sample_indices(batch_size)
if idcs is not None:
indices[bi] = idcs
return indices
def update_experience_meta_data(self, indices, data):
"""Update the metadata (learn data) for the experiences of the given indices.
Args:
indices: list, buffer indices of the experiences for which the data is provided. Note
that get_batch gives the indices of the experiences in the batch
data: dict, containing (some of) the fields specified in learn data during init and
the values of those fields corresponding to the experiences with the provided indices.
"""
for cat in data:
self._call_meta_data_change_listeners(category=cat, indices=indices, pre=True)
self._buffer['experience_meta_data'][cat][indices] = data[cat]
self._call_meta_data_change_listeners(category=cat, indices=indices)
def feed_dict_from_observation(self, observation):
"""Return a feed dict with the internal placeholders and the given observation
Args:
observation: observation dict with numpy observation (no batch dimension)
Returns: the feed dict, observations are expanded to batch dimension 1
"""
feed_dict = {}
meta_data = self._properties['experience_properties']['observations']
for mod in observation:
mod_meta_data = meta_data[mod]
data = np.expand_dims(observation[mod], axis=0)
feed_dict[
self._placeholders['observations'][mod]] = \
ExperienceBuffer.optionally_normalize(data, mod_meta_data)
return feed_dict
@staticmethod
def optionally_normalize(data, meta_data):
if 'norm_dev' in meta_data:
data = data.astype(np.float32)
data /= meta_data['norm_dev']
if 'norm_add' in meta_data:
data += meta_data['norm_add']
return data
def save_to_disk(self, file_path):
"""Saves the contents of the buffer (experiences only) to a specified directory.
Args:
file_path: directory path, file name buffer.npz is appended by the function.
"""
file_path = path.expanduser(file_path)
makedirs(file_path, exist_ok=True)
filename = file_path + 'buffer.npz'
flat_buffer = self._flatten_dict(self._buffer)
for key, npar in flat_buffer.items():
flat_buffer[key] = npar[0:self._buffer_metadata['last_write_index']]
np.savez_compressed(filename, **flat_buffer)
def load_buffer_from_disk(self, file_path):
"""Loads the experiences from a previously saved buffer into this one.
Caution: this function assumes the current buffer is empty and overwrites it. Only
experiences and learn data are loaded, no metadata.
Args:
file_path: directory in which a file 'buffer.npz' is saved.
"""
bufferfile_name = path.expanduser(file_path) + 'buffer.npz'
try:
with np.load(bufferfile_name) as external_flat_buffer:
added_experiences = self._process_flat_buffer_file(external_flat_buffer)
self._buffer_metadata['last_write_index'] = added_experiences - 1
print("Loaded {:d} experiences from {:s}".format(added_experiences, bufferfile_name))
except IOError:
print('Could not load: {:s}'.format(bufferfile_name))
def all_fresh(self):
"""Mark all experiences in the buffer as unused for training. """
self._buffer_metadata['fresh_experience_count'] = self._buffer_metadata['last_write_index']
def discard_memory(self):
"""Discard all experiences to start with an empty buffer"""
self._buffer_metadata['last_write_index'] = -1
self._buffer_metadata['unused_experience_idcs'] = set()
def add_experience_meta_data_update_listener(self, experience_meta_data_category, listener):
"""Add an event listener that is called with indices for which the metadata has changed."""
assert experience_meta_data_category in self._buffer['experience_meta_data'], 'no metadata for {:s}'.format(
experience_meta_data_category)
self._meta_data_change_listeners[experience_meta_data_category].append(listener)
def get_report(self):
"""Get a report of the buffer data for a tb summary"""
report = {'experiences': self._buffer_metadata['last_write_index'] + 1}
for exp_data in self._buffer['experience_meta_data']:
x = self._buffer['experience_meta_data'][exp_data][
0:self._buffer_metadata['last_write_index'], 0]
x = x[~np.isnan(x)]
x = x[~np.isinf(x)]
report[exp_data] = x
return report
def __len__(self):
return self._properties['buffer_size']
def _create_meta_data_change_listeners(self):
return {name: [] for name in self._buffer['experience_meta_data']}
def _call_meta_data_change_listeners(self, category, indices, pre=False):
for callback_function in self._meta_data_change_listeners[category]:
callback_function(indices, pre)
@property
def fresh_experiences(self):
"""The number of experiences not yet trained with (keeping in mind batch size and reuse)"""
return self._buffer_metadata['fresh_experience_count']
@property
def last_episode_mean_return(self):
"""Returns the mean return over the states visited in the last episode.
This function can only be called between episodes; after an experience has been added
with terminal = True, but before the first experience of the next episode is added.
Returns: The mean return over the states visited in the last episode
Throws: assertion error when an episode has not just finished
"""
assert self._experience_and_episode_metadata['current_episode_finished'], \
'last_episode_mean_return can only be called after an episode has just terminated; ' \
'after ' \
'an experience has been added with terminal = True and before the first experience' \
' of the next episode is added.'
return self._experience_and_episode_metadata['last_episode_mean_return']
@property
def last_episode_initial_state_return(self):
"""Returns the return of the first state visited in the last episode.
This function can only be called between episodes; after an experience has been added
with terminal = True, but before the first experience of the next episode is added.
Returns: The return of the first state visited in the last episode
Throws: assertion error when an episode has not just finished
"""
assert self._experience_and_episode_metadata['current_episode_finished'], \
'last_episode_initial_state_return can only be called after an episode has just ' \
'terminated; after ' \
'an experience has been added with terminal = True and before the first experience' \
' of the next episode is added.'
return self._experience_and_episode_metadata['last_episode_initial_return']
def _create_buffer(self):
""" Create the numpy nd-arrays for the experiences and their meta data.
Returns:
A dict of the same structure as 'experience_properties' with the initialized numpy
tensors
"""
exp_prop = self._properties['experience_properties']
# here the s a s' r t experience is saved each time-step because of experience replay
# research.
# More memory efficient would be to save s a r t per timestep and ensure timesteps are not
# orphaned (at least 2 subsequent)
assert all(name in exp_prop for name in ['observations', 'action', 'reward'])
exp_prop['observations_post'] = exp_prop['observations']
return self._create_variable_buffer(exp_prop)
def _create_variable_buffer(self, variable_description):
"""Recursively build parts of the experience buffer from the dict definition.
Args:
variable_description: either a signal description dict of the following structure:
{
'shape': <tuple, required, dimensions of signal (e.g. (2,) )>
'dtype': <numpy dtype, required, numpy data type>
'ttype': <tensorflow dtype, required, tensorflow data type>
}
or a (multi level) dict containing signal descriptions as values.
Returns:
numpy nd-array for a signal description, (multi level) dict of numpy arrays for a
(multi level) dict of descriptions
"""
if 'shape' in variable_description and 'dtype' in variable_description:
shape = [self._properties['buffer_size']]
shape.extend(list(variable_description['shape']))
return np.empty(shape=shape, dtype=variable_description['dtype'])
else:
returndict = {}
for var_props in variable_description:
assert isinstance(variable_description[var_props], dict), 'bad experience replay ' \
'settings'
returndict[var_props] = self._create_variable_buffer(
variable_description[var_props])
return returndict
@staticmethod
def _create_buffer_metadata():
"""Create a dict with metadata specific to the operation of the buffer.
Returns: the metadatadict
"""
metadata_dict = {
'last_write_index': -1,
'fresh_experience_count': 0,
'unused_experience_idcs': set(),
}
return metadata_dict
def _create_experience_and_episode_metadata(self):
"""Create a dict with metadata specific to experiences and episodes.
Returns: the metadatadict
"""
self.Seq_ep_rew = namedtuple('rewardseq', ['reward', 'buffer_index'])
metadata_dict = {
'experience_episodes': np.zeros(self._properties['buffer_size'], dtype=np.int32),
'experience_returns': np.zeros(self._properties['buffer_size'], dtype=np.float32),
'last_episode_mean_return': None,
'last_episode_initial_return': None,
'last_episode_rewards': {'episode': 0, 'rewards': []},
'current_episode_index': 0,
'current_episode_finished': False
}
return metadata_dict
def _create_placeholders(self):
"""Create the internal set of tensorflow placeholders to feed experiences to."""
prop = self._properties['experience_properties']
with tf.variable_scope('placeholders'):
return {
'observations': self._create_placeholder_set(prop['observations'], timestep=0),
'observations_post': self._create_placeholder_set(
prop['observations'], timestep=1),
'action': self._create_placeholder_set(prop['action'], timestep=0, name='action'),
'reward': self._create_placeholder_set(prop['reward'], timestep=1, name='reward'),
'terminal': self._create_placeholder_set(prop['terminal'], timestep=1,
name='terminal')
}
def _create_placeholder_set(self, param, **kwargs):
"""Recursively create a (dict of) tf placeholders from a (dict of) signal description(s).
Args:
param: a (dict of) signal description(s) (see init)
Returns: a (dict of) placeholders with the specified type and shape (+ -1 batch dimension)
"""
if 'shape' in param:
shape = [None]
shape.extend(list(param['shape']))
full_name = '{:s}_time_{:d}'.format(kwargs['name'], kwargs['timestep'])
return tf.placeholder(shape=shape, dtype=param['ttype'], name=full_name)
else:
return {name: self._create_placeholder_set(param[name], name=name, **kwargs) for name in
param}
def _create_overwrite_policy(self):
"""Init the overwrite policy which determines the next buffer index to be (over)written to.
Returns: The overwrite policy object
"""
policy_prop = self._properties['buffer_properties']['overwrite policy']
if policy_prop['type'] == 'FIFO':
return FifoOverwritePolicy(self)
elif policy_prop['type'] == 'rank based stochastic':
return StochasticRankBasedOverwritePolicy(
experience_buffer=self,
metric=policy_prop['metric'],
highest_values_highest_priority=policy_prop['proportional'],
alpha=policy_prop['alpha']
)
elif policy_prop['type'] == 'Reservoir':
return ReservoirOverwritePolicy(self)
else:
assert False, 'unknown overwrite policy'
def _create_sample_policy(self):
"""Create the sample policy instance based on the settings dict provided to init.
Returns: the sample policy instance, which determines how to sample from the buffer."""
policy_prop = self._properties['buffer_properties']['sample policy']
if policy_prop['type'] == 'uniform':
return UniformSamplePolicy(self)
elif policy_prop['type'] == 'rank based stochastic':
return RankBasedPrioritizedSamplePolicy(
self, metric=policy_prop['metric'],
highest_values_highest_priority=policy_prop['proportional'],
alpha=policy_prop['alpha'])
else:
assert False, 'unknown sample policy'
def _feed_data(self, feed_dict, exp_cmp, indcs, place_holders, | |
* m.b114) +
5.56331545204103)**2 + (-m.x715 / (0.0001 + 0.9999 * m.b114) +
1.14993711831163)**2 + (-m.x716 / (0.0001 + 0.9999 * m.b114) +
4.5715891663462)**2 - 1) * (0.0001 + 0.9999 * m.b114) + 0.00842593616666874
* m.b114 <= 0.00842593616666874)
m.e197 = Constraint(expr= ((-m.x717 / (0.0001 + 0.9999 * m.b115) +
3.33190103022031)**2 + (-m.x718 / (0.0001 + 0.9999 * m.b115) +
4.9445883792678)**2 + (-m.x719 / (0.0001 + 0.9999 * m.b115) +
7.30728727625694)**2 + (-m.x720 / (0.0001 + 0.9999 * m.b115) +
8.01246235442081)**2 - 1) * (0.0001 + 0.9999 * m.b115) + 0.0152146519034331
* m.b115 <= 0.0152146519034331)
m.e198 = Constraint(expr= ((-m.x721 / (0.0001 + 0.9999 * m.b116) +
9.33298244503801)**2 + (-m.x722 / (0.0001 + 0.9999 * m.b116) +
0.723851580512842)**2 + (-m.x723 / (0.0001 + 0.9999 * m.b116) +
8.42864317892565)**2 + (-m.x724 / (0.0001 + 0.9999 * m.b116) +
4.32119007374061)**2 - 1) * (0.0001 + 0.9999 * m.b116) + 0.0176343231921043
* m.b116 <= 0.0176343231921043)
m.e199 = Constraint(expr= ((-m.x725 / (0.0001 + 0.9999 * m.b117) +
5.08063287228698)**2 + (-m.x726 / (0.0001 + 0.9999 * m.b117) +
8.38761519865226)**2 + (-m.x727 / (0.0001 + 0.9999 * m.b117) +
1.4027356086197)**2 + (-m.x728 / (0.0001 + 0.9999 * m.b117) +
6.86412480592018)**2 - 1) * (0.0001 + 0.9999 * m.b117) + 0.0144248795642564
* m.b117 <= 0.0144248795642564)
m.e200 = Constraint(expr= ((-m.x729 / (0.0001 + 0.9999 * m.b118) +
2.37234068047016)**2 + (-m.x730 / (0.0001 + 0.9999 * m.b118) +
7.05084260559812)**2 + (-m.x731 / (0.0001 + 0.9999 * m.b118) +
9.48571415448197)**2 + (-m.x732 / (0.0001 + 0.9999 * m.b118) +
5.77659906719162)**2 - 1) * (0.0001 + 0.9999 * m.b118) + 0.017769025155675
* m.b118 <= 0.017769025155675)
m.e201 = Constraint(expr= ((-m.x733 / (0.0001 + 0.9999 * m.b119) +
4.16198173364841)**2 + (-m.x734 / (0.0001 + 0.9999 * m.b119) +
5.45114144772148)**2 + (-m.x735 / (0.0001 + 0.9999 * m.b119) +
9.00182905163397)**2 + (-m.x736 / (0.0001 + 0.9999 * m.b119) +
3.4826499770368)**2 - 1) * (0.0001 + 0.9999 * m.b119) + 0.0139198812171686
* m.b119 <= 0.0139198812171686)
m.e202 = Constraint(expr= ((-m.x737 / (0.0001 + 0.9999 * m.b120) +
4.45933786757702)**2 + (-m.x738 / (0.0001 + 0.9999 * m.b120) +
4.47805189258463)**2 + (-m.x739 / (0.0001 + 0.9999 * m.b120) +
6.61692822015399)**2 + (-m.x740 / (0.0001 + 0.9999 * m.b120) +
5.6343120215581)**2 - 1) * (0.0001 + 0.9999 * m.b120) + 0.0114467853996832
* m.b120 <= 0.0114467853996832)
m.e203 = Constraint(expr= m.b81 + m.b82 + m.b83 + m.b84 + m.b85 + m.b86 + m.b87
+ m.b88 + m.b89 + m.b90 + m.b91 + m.b92 + m.b93 + m.b94 + m.b95 + m.b96 +
m.b97 + m.b98 + m.b99 + m.b100 + m.b101 + m.b102 + m.b103 + m.b104 + m.b105
+ m.b106 + m.b107 + m.b108 + m.b109 + m.b110 + m.b111 + m.b112 + m.b113 +
m.b114 + m.b115 + m.b116 + m.b117 + m.b118 + m.b119 + m.b120 == 1)
m.e204 = Constraint(expr= ((-m.x741 / (0.0001 + 0.9999 * m.b121) +
4.04180710023322)**2 + (-m.x742 / (0.0001 + 0.9999 * m.b121) +
0.0638120906615358)**2 + (-m.x743 / (0.0001 + 0.9999 * m.b121) +
9.31163964055327)**2 + (-m.x744 / (0.0001 + 0.9999 * m.b121) +
9.59399362610548)**2 - 1) * (0.0001 + 0.9999 * m.b121) + 0.0194091623111686
* m.b121 <= 0.0194091623111686)
m.e205 = Constraint(expr= ((-m.x745 / (0.0001 + 0.9999 * m.b122) +
7.58630473662528)**2 + (-m.x746 / (0.0001 + 0.9999 * m.b122) +
9.81696808234314)**2 + (-m.x747 / (0.0001 + 0.9999 * m.b122) +
6.80594062551012)**2 + (-m.x748 / (0.0001 + 0.9999 * m.b122) +
5.73941560922778)**2 - 1) * (0.0001 + 0.9999 * m.b122) + 0.0232186601220104
* m.b122 <= 0.0232186601220104)
m.e206 = Constraint(expr= ((-m.x749 / (0.0001 + 0.9999 * m.b123) +
4.73576208695481)**2 + (-m.x750 / (0.0001 + 0.9999 * m.b123) +
2.81737915136856)**2 + (-m.x751 / (0.0001 + 0.9999 * m.b123) +
0.919919756378161)**2 + (-m.x752 / (0.0001 + 0.9999 * m.b123) +
0.427396562561213)**2 - 1) * (0.0001 + 0.9999 * m.b123) +
0.00303939880066688 * m.b123 <= 0.00303939880066688)
m.e207 = Constraint(expr= ((-m.x753 / (0.0001 + 0.9999 * m.b124) +
0.428853030190813)**2 + (-m.x754 / (0.0001 + 0.9999 * m.b124) +
5.71294529671424)**2 + (-m.x755 / (0.0001 + 0.9999 * m.b124) +
2.06079707847737)**2 + (-m.x756 / (0.0001 + 0.9999 * m.b124) +
3.87755584734058)**2 - 1) * (0.0001 + 0.9999 * m.b124) +
0.00511039828326592 * m.b124 <= 0.00511039828326592)
m.e208 = Constraint(expr= ((-m.x757 / (0.0001 + 0.9999 * m.b125) +
1.0774677742481)**2 + (-m.x758 / (0.0001 + 0.9999 * m.b125) +
0.802343324640142)**2 + (-m.x759 / (0.0001 + 0.9999 * m.b125) +
5.05560926630768)**2 + (-m.x760 / (0.0001 + 0.9999 * m.b125) +
6.38583388950109)**2 - 1) * (0.0001 + 0.9999 * m.b125) +
0.00671427511330145 * m.b125 <= 0.00671427511330145)
m.e209 = Constraint(expr= ((-m.x761 / (0.0001 + 0.9999 * m.b126) +
1.12638621393495)**2 + (-m.x762 / (0.0001 + 0.9999 * m.b126) +
1.60465763780203)**2 + (-m.x763 / (0.0001 + 0.9999 * m.b126) +
4.07986801140447)**2 + (-m.x764 / (0.0001 + 0.9999 * m.b126) +
2.17037731980447)**2 - 1) * (0.0001 + 0.9999 * m.b126) +
0.00241995327383022 * m.b126 <= 0.00241995327383022)
m.e210 = Constraint(expr= ((-m.x765 / (0.0001 + 0.9999 * m.b127) +
1.82688019388165)**2 + (-m.x766 / (0.0001 + 0.9999 * m.b127) +
4.63411239877442)**2 + (-m.x767 / (0.0001 + 0.9999 * m.b127) +
5.05145431164509)**2 + (-m.x768 / (0.0001 + 0.9999 * m.b127) +
3.74855986966667)**2 - 1) * (0.0001 + 0.9999 * m.b127) +
0.00633813807263851 * m.b127 <= 0.00633813807263851)
m.e211 = Constraint(expr= ((-m.x769 / (0.0001 + 0.9999 * m.b128) +
8.44017389324234)**2 + (-m.x770 / (0.0001 + 0.9999 * m.b128) +
3.82403068466693)**2 + (-m.x771 / (0.0001 + 0.9999 * m.b128) +
8.92353737043929)**2 + (-m.x772 / (0.0001 + 0.9999 * m.b128) +
4.64273943517454)**2 - 1) * (0.0001 + 0.9999 * m.b128) + 0.0186044294689995
* m.b128 <= 0.0186044294689995)
m.e212 = Constraint(expr= ((-m.x773 / (0.0001 + 0.9999 * m.b129) +
5.87176386853829)**2 + (-m.x774 / (0.0001 + 0.9999 * m.b129) +
1.44175632208916)**2 + (-m.x775 / (0.0001 + 0.9999 * m.b129) +
3.18165222878213)**2 + (-m.x776 / (0.0001 + 0.9999 * m.b129) +
5.19789554578514)**2 - 1) * (0.0001 + 0.9999 * m.b129) +
0.00726973012299631 * m.b129 <= 0.00726973012299631)
m.e213 = Constraint(expr= ((-m.x777 / (0.0001 + 0.9999 * m.b130) +
6.76013297629122)**2 + (-m.x778 / (0.0001 + 0.9999 * m.b130) +
3.21254997009641)**2 + (-m.x779 / (0.0001 + 0.9999 * m.b130) +
9.46706727409796)**2 + (-m.x780 / (0.0001 + 0.9999 * m.b130) +
0.816604516799142)**2 - 1) * (0.0001 + 0.9999 * m.b130) + 0.014531208087666
* m.b130 <= 0.014531208087666)
m.e214 = Constraint(expr= ((-m.x781 / (0.0001 + 0.9999 * m.b131) +
3.24901644799959)**2 + (-m.x782 / (0.0001 + 0.9999 * m.b131) +
7.51480516788174)**2 + (-m.x783 / (0.0001 + 0.9999 * m.b131) +
4.01508707364147)**2 + (-m.x784 / (0.0001 + 0.9999 * m.b131) +
4.50822439140953)**2 - 1) * (0.0001 + 0.9999 * m.b131) + 0.0102473415962817
* m.b131 <= 0.0102473415962817)
m.e215 = Constraint(expr= ((-m.x785 / (0.0001 + 0.9999 * m.b132) +
0.388900076234485)**2 + (-m.x786 / (0.0001 + 0.9999 * m.b132) +
5.87163371055414)**2 + (-m.x787 / (0.0001 + 0.9999 * m.b132) +
3.83580726436302)**2 + (-m.x788 / (0.0001 + 0.9999 * m.b132) +
3.02326353201313)**2 - 1) * (0.0001 + 0.9999 * m.b132) +
0.00574808654535516 * m.b132 <= 0.00574808654535516)
m.e216 = Constraint(expr= ((-m.x789 / (0.0001 + 0.9999 * m.b133) +
0.790446136347066)**2 + (-m.x790 / (0.0001 + 0.9999 * m.b133) +
8.25540861746511)**2 + (-m.x791 / (0.0001 + 0.9999 * m.b133) +
8.76325441282356)**2 + (-m.x792 / (0.0001 + 0.9999 * m.b133) +
3.16275732090416)**2 - 1) * (0.0001 + 0.9999 * m.b133) + 0.0154574238310588
* m.b133 <= 0.0154574238310588)
m.e217 = Constraint(expr= ((-m.x793 / (0.0001 + 0.9999 * m.b134) +
2.24835841067941)**2 + (-m.x794 / (0.0001 + 0.9999 * m.b134) +
1.49328376014994)**2 + (-m.x795 / (0.0001 + 0.9999 * m.b134) +
6.05269717076173)**2 + (-m.x796 / (0.0001 + 0.9999 * m.b134) +
9.30035396512944)**2 - 1) * (0.0001 + 0.9999 * m.b134) + 0.0129416738848846
* m.b134 <= 0.0129416738848846)
m.e218 = Constraint(expr= ((-m.x797 / (0.0001 + 0.9999 * m.b135) +
9.00506853532299)**2 + (-m.x798 / (0.0001 + 0.9999 * m.b135) +
4.84258207392989)**2 + (-m.x799 / (0.0001 + 0.9999 * m.b135) +
5.30242448190069)**2 + (-m.x800 / (0.0001 + 0.9999 * m.b135) +
1.96947429895047)**2 - 1) * (0.0001 + 0.9999 * m.b135) + 0.0135536394869098
* m.b135 <= 0.0135536394869098)
m.e219 = Constraint(expr= ((-m.x801 / | |
= easyprof.parse_args(args)
easyp = easyprof.AppArmorEasyProfile(None, self.options)
orig_templates = easyp.get_templates()
args = self.full_args
args.append('--list-templates')
args.append('--include-templates-dir=%s' %
os.path.join(self.test_include_dir, 'templates'))
(self.options, self.args) = easyprof.parse_args(args)
easyp = easyprof.AppArmorEasyProfile(None, self.options)
inc_templates = easyp.get_templates()
self.assertTrue(len(inc_templates) == len(orig_templates) * 2,
"templates missing: %s" % inc_templates)
for i in inc_templates:
self.assertTrue(os.path.exists(i), "Could not find '%s'" % i)
def test_templates_show_include(self):
'''Test templates (show with --include-templates-dir)'''
files = []
for f in glob.glob("%s/templates/*" % self.test_include_dir):
files.append(f)
for f in files:
args = self.full_args
args += ['--show-template',
'--template', f,
'--include-templates-dir=%s' %
os.path.join(self.test_include_dir, 'templates')]
(self.options, self.args) = easyprof.parse_args(args)
easyp = easyprof.AppArmorEasyProfile(None, self.options)
path = os.path.join(easyp.dirs['templates_include'], f)
self.assertTrue(os.path.exists(path), "Could not find '%s'" % path)
open(path).read()
bn = os.path.basename(f)
# setup() copies everything in the include prefixed with inc_
self.assertTrue(bn.startswith('inc_'),
"'%s' does not start with 'inc_'" % bn)
#
# Policygroups tests
#
def test_policygroups_list(self):
'''Test policygroups (list)'''
args = self.full_args
args.append('--list-policy-groups')
(self.options, self.args) = easyprof.parse_args(args)
easyp = easyprof.AppArmorEasyProfile(None, self.options)
for i in easyp.get_policy_groups():
self.assertTrue(os.path.exists(i), "Could not find '%s'" % i)
def test_policygroups_show(self):
'''Test policygroups (show)'''
files = []
for f in glob.glob("%s/policygroups/*" % self.tmpdir):
files.append(f)
for f in files:
args = self.full_args
args += ['--show-policy-group',
'--policy-groups', os.path.basename(f)]
(self.options, self.args) = easyprof.parse_args(args)
easyp = easyprof.AppArmorEasyProfile(None, self.options)
path = os.path.join(easyp.dirs['policygroups'], f)
self.assertTrue(os.path.exists(path), "Could not find '%s'" % path)
open(path).read()
def test_policygroups_list_include(self):
'''Test policygroups (list with --include-policy-groups-dir)'''
args = self.full_args
args.append('--list-policy-groups')
(self.options, self.args) = easyprof.parse_args(args)
easyp = easyprof.AppArmorEasyProfile(None, self.options)
orig_policy_groups = easyp.get_policy_groups()
args = self.full_args
args.append('--list-policy-groups')
args.append('--include-policy-groups-dir=%s' %
os.path.join(self.test_include_dir, 'policygroups'))
(self.options, self.args) = easyprof.parse_args(args)
easyp = easyprof.AppArmorEasyProfile(None, self.options)
inc_policy_groups = easyp.get_policy_groups()
self.assertTrue(len(inc_policy_groups) == len(orig_policy_groups) * 2,
"policy_groups missing: %s" % inc_policy_groups)
for i in inc_policy_groups:
self.assertTrue(os.path.exists(i), "Could not find '%s'" % i)
def test_policygroups_show_include(self):
'''Test policygroups (show with --include-policy-groups-dir)'''
files = []
for f in glob.glob("%s/policygroups/*" % self.test_include_dir):
files.append(f)
for f in files:
args = self.full_args
args += ['--show-policy-group',
'--policy-groups', os.path.basename(f),
'--include-policy-groups-dir=%s' %
os.path.join(self.test_include_dir, 'policygroups')]
(self.options, self.args) = easyprof.parse_args(args)
easyp = easyprof.AppArmorEasyProfile(None, self.options)
path = os.path.join(easyp.dirs['policygroups_include'], f)
self.assertTrue(os.path.exists(path), "Could not find '%s'" % path)
open(path).read()
bn = os.path.basename(f)
# setup() copies everything in the include prefixed with inc_
self.assertTrue(bn.startswith('inc_'),
"'%s' does not start with 'inc_'" % bn)
#
# Manifest file argument tests
#
def test_manifest_argument(self):
'''Test manifest argument'''
# setup our manifest
self.manifest = os.path.join(self.tmpdir, 'manifest.json')
contents = '''
{"security": {"domain.reverse.appname": {"name": "simple-app"}}}
'''
open(self.manifest, 'w').write(contents)
args = self.full_args
args.extend(['--manifest', self.manifest])
easyprof.parse_args(args)
def _manifest_conflicts(self, opt, value):
'''Helper for conflicts tests'''
# setup our manifest
self.manifest = os.path.join(self.tmpdir, 'manifest.json')
contents = '''
{"security": {"domain.reverse.appname": {"binary": /nonexistent"}}}
'''
open(self.manifest, 'w').write(contents)
# opt first
args = self.full_args
args.extend([opt, value, '--manifest', self.manifest])
raised = False
try:
easyprof.parse_args(args, InterceptingOptionParser())
except InterceptedError:
raised = True
self.assertTrue(raised, msg="%s and manifest arguments did not " \
"raise a parse error" % opt)
# manifest first
args = self.full_args
args.extend(['--manifest', self.manifest, opt, value])
raised = False
try:
easyprof.parse_args(args, InterceptingOptionParser())
except InterceptedError:
raised = True
self.assertTrue(raised, msg="%s and manifest arguments did not " \
"raise a parse error" % opt)
def test_manifest_conflicts_profilename(self):
'''Test manifest arg conflicts with profile_name arg'''
self._manifest_conflicts("--profile-name", "simple-app")
def test_manifest_conflicts_copyright(self):
'''Test manifest arg conflicts with copyright arg'''
self._manifest_conflicts("--copyright", "2013-01-01")
def test_manifest_conflicts_author(self):
'''Test manifest arg conflicts with author arg'''
self._manifest_conflicts("--author", "Foo Bar")
def test_manifest_conflicts_comment(self):
'''Test manifest arg conflicts with comment arg'''
self._manifest_conflicts("--comment", "some comment")
def test_manifest_conflicts_abstractions(self):
'''Test manifest arg conflicts with abstractions arg'''
self._manifest_conflicts("--abstractions", "base")
def test_manifest_conflicts_read_path(self):
'''Test manifest arg conflicts with read-path arg'''
self._manifest_conflicts("--read-path", "/etc/passwd")
def test_manifest_conflicts_write_path(self):
'''Test manifest arg conflicts with write-path arg'''
self._manifest_conflicts("--write-path", "/tmp/foo")
def test_manifest_conflicts_policy_groups(self):
'''Test manifest arg conflicts with policy-groups arg'''
self._manifest_conflicts("--policy-groups", "opt-application")
def test_manifest_conflicts_name(self):
'''Test manifest arg conflicts with name arg'''
self._manifest_conflicts("--name", "foo")
def test_manifest_conflicts_template_var(self):
'''Test manifest arg conflicts with template-var arg'''
self._manifest_conflicts("--template-var", "foo")
def test_manifest_conflicts_policy_version(self):
'''Test manifest arg conflicts with policy-version arg'''
self._manifest_conflicts("--policy-version", "1.0")
def test_manifest_conflicts_policy_vendor(self):
'''Test manifest arg conflicts with policy-vendor arg'''
self._manifest_conflicts("--policy-vendor", "somevendor")
#
# Test genpolicy
#
def _gen_policy(self, name=None, template=None, extra_args=[]):
'''Generate a policy'''
# Build up our args
args = self.full_args
if template == None:
args.append('--template=%s' % self.test_template)
else:
args.append('--template=%s' % template)
if name != None:
args.append('--name=%s' % name)
if len(extra_args) > 0:
args += extra_args
args.append(self.binary)
# Now parse our args
(self.options, self.args) = easyprof.parse_args(args)
easyp = easyprof.AppArmorEasyProfile(self.binary, self.options)
params = easyprof.gen_policy_params(self.binary, self.options)
p = easyp.gen_policy(**params)
# We always need to check for these
search_terms = [self.binary]
if name != None:
search_terms.append(name)
if template == None:
search_terms.append(self.test_template)
for s in search_terms:
self.assertTrue(s in p, "Could not find '%s' in:\n%s" % (s, p))
# ###NAME### should be replaced with self.binary or 'name'. Check for that
inv_s = '###NAME###'
self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % (inv_s, p))
if debugging:
sys.stdout.write("%s\n" % p)
return p
def _gen_manifest_policy(self, manifest, use_security_prefix=True):
# Build up our args
args = self.full_args
args.append("--manifest=/dev/null")
(self.options, self.args) = easyprof.parse_args(args)
(binary, self.options) = easyprof.parse_manifest(manifest.emit_json(use_security_prefix), self.options)[0]
easyp = easyprof.AppArmorEasyProfile(binary, self.options)
params = easyprof.gen_policy_params(binary, self.options)
p = easyp.gen_policy(**params)
# ###NAME### should be replaced with self.binary or 'name'. Check for that
inv_s = '###NAME###'
self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % (inv_s, p))
if debugging:
sys.stdout.write("%s\n" % p)
return p
def test__is_safe(self):
'''Test _is_safe()'''
bad = [
"/../../../../etc/passwd",
"abstraction with spaces",
"semicolon;bad",
"bad\x00baz",
"foo/bar",
"foo'bar",
'foo"bar',
]
for s in bad:
self.assertFalse(easyprof._is_safe(s), "'%s' should be bad" %s)
def test_genpolicy_templates_abspath(self):
'''Test genpolicy (abspath to template)'''
# create a new template
template = os.path.join(self.tmpdir, "test-abspath-template")
shutil.copy(os.path.join(self.tmpdir, 'templates', self.test_template), template)
contents = open(template).read()
test_string = "#teststring"
open(template, 'w').write(contents + "\n%s\n" % test_string)
p = self._gen_policy(template=template)
for s in [self.test_template, test_string]:
self.assertTrue(s in p, "Could not find '%s' in:\n%s" % (s, p))
def test_genpolicy_templates_system(self):
'''Test genpolicy (system template)'''
self._gen_policy()
def test_genpolicy_templates_nonexistent(self):
'''Test genpolicy (nonexistent template)'''
try:
self._gen_policy(template=os.path.join(self.tmpdir, "/nonexistent"))
except easyprof.AppArmorException:
return
except Exception:
raise
raise Exception ("template should be invalid")
def test_genpolicy_name(self):
'''Test genpolicy (name)'''
self._gen_policy(name='test-foo')
def test_genpolicy_comment(self):
'''Test genpolicy (comment)'''
s = "test comment"
p = self._gen_policy(extra_args=['--comment=%s' % s])
self.assertTrue(s in p, "Could not find '%s' in:\n%s" % (s, p))
inv_s = '###COMMENT###'
self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % (inv_s, p))
def test_genpolicy_author(self):
'''Test genpolicy (author)'''
s = "<NAME>"
p = self._gen_policy(extra_args=['--author=%s' % s])
self.assertTrue(s in p, "Could not find '%s' in:\n%s" % (s, p))
inv_s = '###AUTHOR###'
self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % (inv_s, p))
def test_genpolicy_copyright(self):
'''Test genpolicy (copyright)'''
s = "2112/01/01"
p = self._gen_policy(extra_args=['--copyright=%s' % s])
self.assertTrue(s in p, "Could not find '%s' in:\n%s" % (s, p))
inv_s = '###COPYRIGHT###'
self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % (inv_s, p))
def test_genpolicy_abstractions(self):
'''Test genpolicy (single abstraction)'''
s = "nameservice"
p = self._gen_policy(extra_args=['--abstractions=%s' % s])
search = "#include <abstractions/%s>" % s
self.assertTrue(search in p, "Could not find '%s' in:\n%s" % (search, p))
inv_s = '###ABSTRACTIONS###'
self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % (inv_s, p))
def test_genpolicy_abstractions_multiple(self):
'''Test genpolicy (multiple abstractions)'''
abstractions = "authentication,X,user-tmp"
p = self._gen_policy(extra_args=['--abstractions=%s' % abstractions])
for s in abstractions.split(','):
search = "#include <abstractions/%s>" % s
self.assertTrue(search in p, "Could not find '%s' in:\n%s" % (search, p))
inv_s = '###ABSTRACTIONS###'
self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % (inv_s, p))
def test_genpolicy_abstractions_bad(self):
'''Test genpolicy (abstractions - bad values)'''
bad = [
"nonexistent",
"/../../../../etc/passwd",
"abstraction with spaces",
]
for s in bad:
try:
self._gen_policy(extra_args=['--abstractions=%s' % s])
except easyprof.AppArmorException:
continue
except Exception:
raise
raise Exception ("abstraction '%s' should be invalid" % s)
def test_genpolicy_profile_name_bad(self):
'''Test genpolicy (profile name - bad values)'''
bad = [
"/../../../../etc/passwd",
"../../../../etc/passwd",
"profile name with spaces",
]
for s in bad:
try:
self._gen_policy(extra_args=['--profile-name=%s' % s])
except easyprof.AppArmorException:
continue
except Exception:
raise
raise Exception ("profile_name '%s' should be invalid" % s)
def test_genpolicy_policy_group_bad(self):
'''Test genpolicy (policy group - bad values)'''
bad = [
"/../../../../etc/passwd",
"../../../../etc/passwd",
"profile name with spaces",
]
for s in bad:
try:
self._gen_policy(extra_args=['--policy-groups=%s' % s])
except easyprof.AppArmorException:
continue
except Exception:
raise
raise Exception ("policy group '%s' should be invalid" % s)
def test_genpolicy_policygroups(self):
'''Test genpolicy (single policygroup)'''
groups = self.test_policygroup
p = self._gen_policy(extra_args=['--policy-groups=%s' % groups])
for s in ['#include <abstractions/nameservice>', '#include <abstractions/gnome>']:
self.assertTrue(s in p, "Could not find '%s' in:\n%s" % (s, p))
inv_s = '###POLICYGROUPS###'
self.assertFalse(inv_s in p, "Found '%s' in :\n%s" % | |
<filename>venv/Lib/site-packages/ObjectListView/OLVPrinter.py<gh_stars>1-10
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#----------------------------------------------------------------------------
# Name: OLVPrinter.py
# Author: <NAME>
# Created: 17 July 2008
# Copyright: (c) 2008 by <NAME>, 2008
# License: wxWindows license
#----------------------------------------------------------------------------
# Change log:
# 2008/07/17 JPP Initial version
#----------------------------------------------------------------------------
# To do:
"""
An OLVPrinter takes an ObjectListView and turns it into a pretty report.
As always, the goal is for this to be as easy to use as possible. A typical
usage should be as simple as::
printer = OLVPrinter(self.myOlv, "My Report Title")
printer.PrintPreview()
"""
import wx
from ObjectListView import GroupListView
from WordWrapRenderer import WordWrapRenderer
#======================================================================
class OLVPrinter(wx.Printout):
"""
An OLVPrinter creates a pretty report from an ObjectListView.
"""
def __init__(self, objectListView=None, title="ObjectListView Printing"):
"""
"""
wx.Printout.__init__(self, title)
self.engine = ReportEngine()
self.printData = wx.PrintData()
self.printData.SetPaperId(wx.PAPER_A4)
self.printData.SetPrintMode(wx.PRINT_MODE_PRINTER)
if objectListView is not None:
self.engine.AddListCtrl(objectListView, title)
#-------------------------------------------------------------------------
# Accessing
def HasPage(self, page):
#print("HasPage(%d)" % page)
return page <= self.engine.GetTotalPages()
def GetPageInfo(self):
#print("GetPageInfo")
return (1, self.engine.GetTotalPages(), 1, 1)
def GetReportFormat(self):
"""
Return the ReportFormat object that controls the appearance of this printout
"""
return self.engine.reportFormat
def SetReportFormat(self, fmt):
"""
Set the ReportFormat object that controls the appearance of this printout
"""
self.engine.reportFormat = fmt
ReportFormat = property(GetReportFormat, SetReportFormat)
#-------------------------------------------------------------------------
# Commands
def PageSetup(self):
"""
Show a Page Setup dialog that will change the configuration of this printout
"""
psdd = wx.PageSetupDialogData(self.printData)
psdd.CalculatePaperSizeFromId()
dlg = wx.PageSetupDialog(self, psdd)
dlg.ShowModal()
# this makes a copy of the wx.PrintData instead of just saving
# a reference to the one inside the PrintDialogData that will
# be destroyed when the dialog is destroyed
self.printData = wx.PrintData(dlg.GetPageSetupData().GetPrintData())
dlg.Destroy()
def PrintPreview(
self,
parent=None,
title="ObjectListView Print Preview",
bounds=(
20,
50,
800,
800)):
"""
Show a Print Preview of this report
"""
data = wx.PrintDialogData(self.printData)
# TODO: Implement some proper way to copy the printer
#forPrinter = OLVPrinter()
#forPrinter.ReportFormat = self.ReportFormat
#forPrinter.engine.objectListViews = list(self.engine.objectListViews)
self.preview = wx.PrintPreview(self, None, data)
if not self.preview.Ok():
return False
pfrm = wx.PreviewFrame(self.preview, parent, title)
pfrm.Initialize()
pfrm.SetPosition(bounds[0:2])
pfrm.SetSize(bounds[2:4])
pfrm.Show(True)
return True
def DoPrint(self, parent=None):
"""
Send the report to the configured printer
"""
pdd = wx.PrintDialogData(self.printData)
printer = wx.Printer(pdd)
if printer.Print(parent, self, True):
self.printData = wx.PrintData(
printer.GetPrintDialogData().GetPrintData())
else:
wx.MessageBox(
"There was a problem printing.\nPerhaps your current printer is not set correctly?",
"Printing",
wx.OK)
printout.Destroy()
#-------------------------------------------------------------------------
# Event handlers
def OnPreparePrinting(self):
"""
Prepare for printing. This event is sent before any of the others
"""
#print("OnPreparePrinting")
#print("self.GetDC() = %s" % self.GetDC())
self.engine.CalculateTotalPages(self.GetDC())
self.engine.StartPrinting()
def OnBeginDocument(self, start, end):
"""
Begin printing one copy of the document. Return False to cancel the job
"""
#print("OnBeginDocument(%d, %d)" % (start, end))
if not super(OLVPrinter, self).OnBeginDocument(start, end):
return False
return True
def OnEndDocument(self):
#print("OnEndDocument")
super(OLVPrinter, self).OnEndDocument()
def OnBeginPrinting(self):
#print("OnBeginPrinting")
super(OLVPrinter, self).OnBeginPrinting()
def OnEndPrinting(self):
#print("OnEndPrinting")
super(OLVPrinter, self).OnEndPrinting()
def OnPrintPage(self, page):
#print("OnPrintPage(%d)" % page)
return self.engine.PrintPage(self.GetDC(), page)
#======================================================================
class ReportEngine(object):
"""
A ReportEngine handles all the work of actually producing a report.
"""
def __init__(self):
"""
"""
self.currentPage = -1
self.totalPages = -1
self.blocks = list()
self.blockInsertionIndex = 0
self.objectListViews = list()
self.reportFormat = ReportFormat()
self.isColumnHeadingsOnEachPage = True
self.alwaysCenterColumnHeader = True
self.reportHeaderText = "Report Header Text"
self.reportFooterText = "Report Footer Text"
self.pageHeaderText = "This is the header"
self.pageFooterText = "This is the footer"
self.isPrintSelectionOnly = False
self.isShrinkToFit = False
self.canCellsWrap = True
self.watermarkText = "WATERMARK"
self.watermarkFont = None
self.watermarkColor = None
#-------------------------------------------------------------------------
# Accessing
def GetNamedFormat(self, name):
"""
Return the given format
"""
return self.reportFormat.GetNamedFormat(name)
def GetTotalPages(self):
"""
Return the total number of pages that this report will produce.
CalculateTotalPages() must be called before this is accurate.
"""
return self.totalPages
#-------------------------------------------------------------------------
# Calculating
def CalculateTotalPages(self, dc):
"""
Do the work of calculating how many pages this report will occupy?
This is expensive because it basically prints the whole report.
"""
self.StartPrinting()
self.totalPages = 1
while self.PrintOnePage(dc, self.totalPages):
self.totalPages += 1
dc.Clear()
def CalculateBounds(self, dc):
"""
Calculate our page and work bounds
"""
self.pageBounds = (0, 0) + dc.GetSizeTuple()
self.workBounds = list(self.pageBounds)
#-------------------------------------------------------------------------
# Commands
def AddBlock(self, block):
"""
Add the given block at the current insertion point
"""
self.blocks.insert(self.blockInsertionIndex, block)
self.blockInsertionIndex += 1
block.engine = self
def AddListCtrl(self, objectListView, title=None):
"""
Add the given list to those that will be printed by this report.
"""
if objectListView.InReportView():
self.objectListViews.append([objectListView, title])
def DropCurrentBlock(self):
"""
Remove the current block from our list of blocks
"""
self.blocks.pop(0)
self.blockInsertionIndex = 1
#-------------------------------------------------------------------------
# Printing
def StartPrinting(self):
"""
Initial a print job on this engine
"""
self.currentPage = 0
self.blockInsertionIndex = 0
self.blocks = list()
self.AddBlock(ReportBlock())
self.runningBlocks = list()
self.AddRunningBlock(PageHeaderBlock(self))
self.AddRunningBlock(PageFooterBlock(self))
def AddRunningBlock(self, block):
"""
A running block is printed on every page until it is removed
"""
self.runningBlocks.append(block)
block.engine = self
def RemoveRunningBlock(self, block):
"""
A running block is printed on every page until it is removed
"""
self.runningBlocks.remove(block)
def PrintPage(self, dc, pageNumber):
"""
Print the given page on the given device context.
"""
# try:
# pdc = wx.GCDC(dc)
# except:
# pdc = dc
pdc = dc
# If the request page isn't next in order, we have to restart
# the printing process and advance until we reach the desired page
if pageNumber != self.currentPage + 1:
#print("Skipping pages...")
self.StartPrinting()
for i in range(1, pageNumber):
self.PrintOnePage(pdc, i)
dc.Clear()
#print("...finished skipping.")
return self.PrintOnePage(pdc, pageNumber)
def PrintOnePage(self, dc, pageNumber):
"""
Print the current page on the given device context.
Return true if there is still more to print.
"""
self.currentPage = pageNumber
self.CalculateBounds(dc)
self.ApplyPageDecorations(dc)
for x in self.runningBlocks:
x.Print(dc)
while len(self.blocks) and self.blocks[0].Print(dc):
self.DropCurrentBlock()
return len(self.blocks) > 0
def ApplyPageDecorations(self, dc):
"""
"""
fmt = self.GetNamedFormat("Page")
# Draw the page decorations
bounds = list(self.pageBounds)
fmt.DrawDecorations(dc, bounds, self)
# Subtract the area used from the work area
self.workBounds = fmt.SubtractDecorations(dc, self.workBounds)
#======================================================================
class ReportFormat(object):
"""
A ReportFormat defines completely how a report is formatted.
It holds a collection of BlockFormat objects which control the
formatting of individual blocks of the report
"""
def __init__(self):
"""
"""
self.formats = [
"Page",
"ReportHeader",
"PageHeader",
"ListHeader",
"GroupTitle",
"List",
"ColumnHeader",
"ListRows",
"Row",
"ListFooter",
"PageFooter",
"ReportFooter"
]
for x in self.formats:
setattr(self, x, BlockFormat())
def GetNamedFormat(self, name):
"""
Return the format used in to format a block with the given name.
"""
return getattr(self, name)
@staticmethod
def Normal(fontName="Arial"):
"""
Return a reasonable default format for a report
"""
fmt = ReportFormat()
fmt.PageHeader.Font = wx.FFont(
24,
wx.FONTFAMILY_DEFAULT,
face=fontName)
fmt.PageHeader.TextAlignment = wx.ALIGN_CENTRE
fmt.PageHeader.Add(FrameDecoration(pen=wx.Pen(wx.BLUE, 1), space=5))
#fmt.PageHeader.Add(LineDecoration(pen=wx.Pen(wx.BLUE, 2), space=5))
fmt.ReportHeader.Font = wx.FFont(
36,
wx.FONTFAMILY_DEFAULT,
face=fontName)
fmt.ReportHeader.TextColor = wx.RED
fmt.ReportHeader.Padding = (0, 12, 0, 12)
fmt.ListHeader.Add(
LineDecoration(
side=Decoration.BOTTOM,
pen=wx.Pen(
wx.GREEN,
1)))
fmt.PageFooter.Font = wx.FFont(
12,
wx.FONTFAMILY_DEFAULT,
face=fontName)
fmt.PageFooter.TextAlignment = wx.ALIGN_RIGHT
fmt.PageFooter.Add(
LineDecoration(
side=Decoration.TOP,
pen=wx.Pen(
wx.BLUE,
1),
space=3))
fmt.Row.Font = wx.FFont(12, wx.FONTFAMILY_DEFAULT, face=fontName)
# fmt.ColumnHeader.CellPadding=25
fmt.ColumnHeader.GridPen = wx.Pen(wx.RED, 1)
fmt.Row.CellPadding = (10, 10, 0, 10)
fmt.Row.GridPen = wx.Pen(wx.BLUE, 1)
#fmt.ColumnHeader.Add(FrameDecoration(pen=wx.Pen(wx.RED, 1)))
#fmt.Row.Add(FrameDecoration(pen=wx.Pen(wx.RED, 10)))
#fmt.Row.Add(LineDecoration(side=Decoration.BOTTOM, pen=wx.Pen(wx.GREEN, 1)))
return fmt
#======================================================================
class BlockFormat(object):
"""
A block format defines how a Block is formatted.
"""
def __init__(self):
"""
"""
self.padding = None
self.decorations = list()
self.font = wx.FFont(14, wx.FONTFAMILY_SWISS, face="Gill Sans")
self.textColor = None
self.textAlignment = wx.ALIGN_LEFT
self.cellPadding = None
self.gridPen = None
#-------------------------------------------------------------------------
# Accessing
def GetFont(self):
"""
Return the font used by this format
"""
return self.font
def SetFont(self, font):
"""
Set the font used by this format
"""
self.font = font
def GetTextAlignment(self):
"""
Return the alignment of text in this format
"""
return self.textAlignment
def SetTextAlignment(self, alignment):
"""
Set the alignment of text in this format
"""
self.textAlignment = alignment
def GetTextColor(self):
"""
Return the color of text in this format
"""
return self.textColor
def SetTextColor(self, color):
"""
Set the color of text in this format
"""
self.textColor = color
def GetPadding(self):
"""
Get the padding around this format
"""
return self.padding
def SetPadding(self, padding):
"""
Set the padding around this format
Padding is either a single numeric (indicating the values on all sides)
or a collection of paddings [left, top, right, bottom]
"""
self.padding = self._MakePadding(padding)
def GetCellPadding(self):
"""
Get the padding around cells in this format
"""
return self.cellPadding
def SetCellPadding(self, padding):
"""
Set the padding around cells in this format
Padding is either a | |
#!/usr/bin/python3
"""
Copyright (c) 2015 <NAME> - BVS
"""
import datetime
import light
import usartcomm
SWITCH_STATE_OFF = 0
SWITCH_STATE_ON = 1
SWITCH_STATE_CHANGE = 2
class LightMaster():
"""
Take care off all communication with the slave lights
"""
def __init__(self, id=102):
self.id = id
self.serial = usartcomm.UsartComm()
self.ok = False
# Slave in communication with the master, paired or not.
self.slaves = []
# list of messages to be sent.
self.messages = []
# list of pairing messages to be sent.
self.messages_pairing = []
# Not used for the moment
self.acknowledgment = []
# State, managed by the switch.
self.switch_state = SWITCH_STATE_CHANGE
# To save the slaves state.
self.save_slaves = []
def loop(self):
"""
Main loop of the application.
Will be in charge to retrieve all messages from slaves
and change the display according to thoses messages.
"""
self.ok = True
while self.ok:
#print("----")
#print("INTER:", self.switch_state)
#self.print_slaves()
#self.print_messages()
ret = self.serial.read(5)
if ret is None:
print("Pas de message")
continue
elif ret == b'':
print("timeout")
continue
if len(ret) == 5:
self.process_message(ret)
def process_message(self, message):
""""
Process a slave's message
"""
message = self.message_to_int(message)
#print("message :", message)
if message[0] != self.id and message[0] != 0xFF:
print("message pour un autre destinataire", message[0])
self.print_message(message)
return -1
# Message from the associated switch.
# TODO : make a function.
if message[0] == 0xFF and message[1] == self.id :
print("Message from the switch:", message[2])
# Same state
if message[2] == self.switch_state :
print(" - Deja dans cet etat")
message = [0xFF, self.id,
message[2] | usartcomm.ACK | usartcomm.M_NOT_OK,
0xFF, 0xFF]
self.send_message(message)
if message[2] == usartcomm.OFF:
print("cmd SWITCH - Toutes les lumieres : off")
self.switch_to_state_OFF()
elif message[2] == usartcomm.ON:
print("cmd SWITCH - Toutes les lumieres : on")
self.switch_to_state_ON()
elif message[2] == usartcomm.CHANGE:
print("cmd SWITCH - Toutes les lumieres : auto")
self.switch_to_state_CHANGE()
return
id_slave = message[1]
self.update_light_time(message)
if message[2] == usartcomm.PRESENCE:
# Présence du luminaire
self.check_command(id_slave)
elif message[2] == usartcomm.DEMAND_PAIRING:
# Demande d'appairage
self.check_pairing(id_slave)
elif message[2] & usartcomm.ACK == usartcomm.ACK:
# Acknowledgment commande
if message[2] & usartcomm.RES == usartcomm.S_NOT_OK:
print("There is an error for the last command")
self.print_message(message)
self.check_command(id_slave)
else:
# Unknown type of message
print("Unknown:", message)
pass
def slave_index(self, message):
"""
return the index of the slave in self.slaves
If the a slave is not found a new one is created with his state
list(int) : message send by the slave
"""
id_slave = message[1]
i = self._slave_index(id_slave)
if i == -1:
print("création du slave {}".format(id_slave))
self.slaves.append(light.Light(id_slave))
#must be from a presence or a pairing message
if message[2] == usartcomm.PRESENCE:
self.slaves[i].status = message[3]
self.slaves[i].power = message[4]
elif message[2] == usartcomm.DEMAND_PAIRING:
self.slaves[i].status = 2
else:
print("Message not conform, Light not initilised")
return i
def slave_index_sec(self, id_slave):
"""
return the index of the slave in self.slaves
If the a slave is not found throw an exeption
"""
i = self._slave_index(id_slave)
if i == -1:
raise IndexError("This slave {} do not exist".format(id_slave))
pass
return i
def _slave_index(self, id_slave):
"""
return the index of the slave in self.slaves
If the a slave is not found return -1
"""
i = 0
while i < len(self.slaves):
if self.slaves[i].id == id_slave:
break
i += 1
if i == len(self.slaves):
return -1
return i
def update_light_time(self, message):
"""
update light state according to messages send by slaves
"""
index = self.slave_index(message)
self.slaves[index].time = datetime.datetime.now()
def message_to_int(self, message):
ret = []
for i in message:
ret.append(int(i))
return ret
def check_pairing(self, id_slave):
"""
Check if there a pairing for the slave
"""
#print("pairing of :", id_slave)
for i in range(len(self.messages_pairing)):
if self.messages_pairing[i][0] == id_slave:
self.send_message(self.messages_pairing[i])
del self.messages_pairing[i]
return 1
return -1
def check_command(self, id_slave):
"""
Check if there is a command demand for the slave
"""
#print("presence of :", id_slave)
for i in range(len(self.messages)):
if self.messages[i][0] == id_slave:
self.send_message(self.messages[i])
del self.messages[i]
return 1
return -1
def is_slave(self, id_slave):
"""
Indicate if the slave existe"
"""
try:
self.slave_index_sec(id_slave)
except IndexError:
return False
return True
def is_paired(self, id_slave):
"""
return True if the slave is paired, false otherwise
"""
try:
i = self.slave_index_sec(id_slave)
except IndexError:
return False
if self.slaves[i].status == 2:
return False
return True
def update_status(self, id_slave, value):
"""
Will update the data in memory
id_slave int : id of the slave
value int [0,1,2] : value to set
"""
index = self.slave_index_sec(id_slave)
self.slaves[index].status = value
def update_power(self, id_slave, value):
"""
Will update the data in memory
id_slave int : id of the slave
value int [0,1,2] : value to set
"""
index = self.slave_index_sec(id_slave)
self.slaves[index].power = value
### Command that can be sent
def cmd_appair(self, id_slave):
"""
Send a pairing acknowledgment to the slave
Add the slave to the slave liste
id_slave int: id of the slave
"""
if not self.is_slave(id_slave):
return -1
if self.is_paired(id_slave):
return -1
message = [id_slave, self.id, usartcomm.PAIRING, 0xFF, 0xFF]
self.messages_pairing.append(message)
self.update_status(id_slave, 0)
print("self.messages_pairing", self.messages_pairing)
def cmd_unappair(self, id_slave):
"""
Unappair the slave
id_slave int: id of the slave
"""
if not self.is_slave(id_slave):
return -1
if not self.is_paired(id_slave):
return -1
message = [id_slave, self.id, usartcomm.UNPAIRING, 0xFF, 0xFF]
self.add_message(message)
self.update_status(id_slave, 2)
def cmd_switch_on(self, id_slave, power=0):
"""
Switch on the slave
id_slave int: id of the slave
power int [0;255], power of the light
"""
if not self.is_slave(id_slave):
return -1
if not self.is_paired(id_slave):
return -1
message = [id_slave, self.id, usartcomm.ACK_PRES,
usartcomm.ON, power]
self.add_message(message)
self.update_status(id_slave, 1)
def cmd_switch_off(self, id_slave):
"""
Switch off the slave
id_slave int: id of the slave
"""
if not self.is_slave(id_slave):
return -1
if not self.is_paired(id_slave):
return -1
message = [id_slave, self.id, usartcomm.ACK_PRES,
usartcomm.OFF, 0xFF]
self.add_message(message)
self.update_status(id_slave, 0)
def cmd_change_power(self, id_slave, power):
"""
Change the power of a slave
id_slave int: id of the slave
power int [0;255], power of the light
"""
if not self.is_slave(id_slave):
return -1
if not self.is_paired(id_slave):
return -1
message = [id_slave, self.id, usartcomm.ACK_PRES,
usartcomm.CHANGE, power]
self.add_message(message)
self.update_power(id_slave, power)
## Switch state command
def switch_to_state_ON(self):
"""
Execute the actions when the switch is ON
"""
if self.switch_state == SWITCH_STATE_ON:
return
if self.switch_state == SWITCH_STATE_CHANGE:
self.__save_slaves_state()
for sl in self.slaves:
if not self.is_paired(sl.id):
continue
if sl.status == 0:
self.cmd_switch_on(sl.id, 255)
elif sl.status == 1:
self.cmd_change_power(sl.id, 255)
self.switch_state = SWITCH_STATE_ON
self.print_save_slaves()
def switch_to_state_OFF(self):
"""
Execute the actions when the switch is OFF
"""
if self.switch_state == SWITCH_STATE_OFF:
return
if self.switch_state == SWITCH_STATE_CHANGE:
self.__save_slaves_state()
for sl in self.slaves:
if not self.is_paired(sl.id):
continue
if sl.status == 0:
continue
self.cmd_switch_off(sl.id)
self.switch_state = SWITCH_STATE_OFF
self.print_save_slaves()
def switch_to_state_CHANGE(self):
"""
Execute the actions when the switch is CHANGE
"""
if self.switch_state == SWITCH_STATE_CHANGE:
return
states = self.__load_slaves_state()
self.switch_state = SWITCH_STATE_CHANGE
self.print_save_slaves()
def __save_slaves_state(self):
"""
save the current state of slaves.
Its saved in a list for the moment.
"""
del self.save_slaves[:]
for sl in self.slaves:
self.save_slaves.append(sl)
def __load_slaves_state(self):
"""
Load the messages that will have to be sent according to the
current situation to retrieve the old configuration.
Send the messages.
"""
for sl in self.save_slaves:
current = self.slaves[self._slave_index(sl.id)]
if not self.is_paired(sl.id):
continue
if sl.status == 0:
if current.status == 0:
continue
elif current.status == 1:
self.cmd_switch_off(sl.id)
elif sl.status == 1:
if current.status == 0:
self.cmd_switch_on(sl.id, sl.power)
elif current.status == 1:
self.cmd_change_power(sl.id, sl.power)
### Private ###
def print_slaves(self):
"""
Print the current states of all slaves
"""
print("Slave list :")
for i in self.slaves:
print(i)
def print_save_slaves(self):
"""
Print the current states of saved slaves.
"""
print("Save slave list :")
for i in self.save_slaves:
print(i)
def print_messages(self):
"""
Print the current messages to be sent
"""
print("message list :")
for i in self.messages:
| |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 21 17:15:22 2020
@author: james.z.hare
"""
import sys, os
sys.path.append(os.path.realpath('..'))
import socket
from _thread import *
import dill as pickle
import src
from src.StateTypes.TeamState import TeamStateClass
from src.Games.TeamCaptureFlagGame import TeamCaptureFlagClass
from src.AgentTypes.TeamAgents import TeamHumanAgentClass, TeamStaticRandomAgentClass, TeamUniformRandomAgentClass
from src.UnitTypes.ExampleUnit import FlagClass
from src.UnitTypes.ProjectileModule import ProjectileClass
from src.UnitTypes.SoldierModule import SoldierClass
from src.UnitTypes.TankModule import TankClass
from src.UnitTypes.TruckModule import TruckClass
from src.UnitTypes.AirplaneModule import AirplaneClass
from src.UnitTypes.WallUnitModule import WallClass
from copy import deepcopy
import random
import time
from datetime import datetime
from src.AgentTypes.RemoteAgent import RemoteTeamAgentClass
#from TeamCaptureFlagGameHealth import TeamCaptureFlagHealthClass
from src.Games.TeamAnnihilationGameHealth import TeamAnnihilationGameClass
import enum
import sys
import json
import select
import math
from reliableSockets import sendReliablyBinary, recvReliablyBinary2, emptySocket
from src.CSVOutputModule import getAgentObservations
if len(sys.argv) >= 2:
GameType = sys.argv[1]
if GameType not in [ '--train', '--test' ]:
print("Specify Game Type: --train or --test")
sys.exit()
else:
print("Specify Game Type: --train or --test")
sys.exit()
modules = [ 'Soldier', 'Truck', 'Tank', 'Airplane', 'Flag', 'Wall' ]
UnitClasses = [ SoldierClass, TruckClass, TankClass, AirplaneClass, FlagClass, WallClass ]
idCount = 0
ReadyPlayers = 0
NumberOfPlayers = 4
aPositions = {}
mTurn = 0
NumberOfPlayersPerTeam = [ 2, 2 ]
Health = 1
NumberOfUnits = [ 5, 5, 5, 5 ]
State0 = TeamStateClass()
GameDic = {}
AgentDict = {}
State0.FlagPosition = {}
State0.BoardSize = ( 10, 11, 2 )
# Define the section of the board that each player may place their units
TwoPlayerCord = ( ((0,math.floor(State0.BoardSize[0]/2-1)),(0,math.floor(State0.BoardSize[1]/2-1)),(0,State0.BoardSize[2]-1)),
((math.ceil(State0.BoardSize[0]/2),State0.BoardSize[0]-1),(0,math.floor(State0.BoardSize[1]/2)-1),(0,State0.BoardSize[2]-1)),
((0,math.floor(State0.BoardSize[0]/2-1)),(math.ceil(State0.BoardSize[1]/2),State0.BoardSize[1]-1),(0,State0.BoardSize[2]-1)),
((math.ceil(State0.BoardSize[0]/2),State0.BoardSize[0]-1),(math.ceil(State0.BoardSize[1]/2),State0.BoardSize[1]-1),(0,State0.BoardSize[2]-1))
)
TwoPlayerCordUnits = ( ((0,math.floor(State0.BoardSize[0]/2-1)),(math.floor(State0.BoardSize[1]/2)-3,math.floor(State0.BoardSize[1]/2)-1),(0,State0.BoardSize[2]-1)),
((math.ceil(State0.BoardSize[0]/2),State0.BoardSize[0]-1),(math.floor(State0.BoardSize[1]/2)-1,math.floor(State0.BoardSize[1]/2)-1),(0,State0.BoardSize[2]-1)),
((0,math.floor(State0.BoardSize[0]/2)-1),(math.floor(State0.BoardSize[1]/2),State0.BoardSize[1]-1),(0,State0.BoardSize[2]-1)),
((math.ceil(State0.BoardSize[0]/2),State0.BoardSize[0]-1),(math.ceil(State0.BoardSize[1]/2),State0.BoardSize[1]-1),(0,State0.BoardSize[2]-1))
)
TwoPlayerCordFlag = ( ((0,math.floor(State0.BoardSize[0]/2)-1),(0,math.floor(State0.BoardSize[1]/2)-4),(0,State0.BoardSize[2]-1)),
((math.ceil(State0.BoardSize[0]/2),State0.BoardSize[0]-1),(0,math.floor(State0.BoardSize[1]/2)-4),(0,State0.BoardSize[2]-1)),
((0,math.floor(State0.BoardSize[0]/2)-1),(math.floor(State0.BoardSize[1]/2),State0.BoardSize[1]-1),(0,State0.BoardSize[2]-1)),
((math.ceil(State0.BoardSize[0]/2),State0.BoardSize[0]-1),(math.ceil(State0.BoardSize[1]/2),State0.BoardSize[1]-1),(0,State0.BoardSize[2]-1))
)
OnePlayerCord = ( ((0,State0.BoardSize[0]-1),(0,State0.BoardSize[1]/2-1),(0,State0.BoardSize[2]-1)),
((0,State0.BoardSize[0]-1),(State0.BoardSize[1]/2,State0.BoardSize[1]-1),(0,State0.BoardSize[2]-1))
)
OnePlayerCordUnits = ( ((0,State0.BoardSize[0]-1),(State0.BoardSize[1]/2-3,State0.BoardSize[1]/2-1),(0,State0.BoardSize[2]-1)),
((0,State0.BoardSize[0]-1),(State0.BoardSize[1]/2,State0.BoardSize[1]-1),(0,State0.BoardSize[2]-1))
)
OnePlayerCordFlag = ( ((0,State0.BoardSize[0]-1),(0,State0.BoardSize[1]/2-4),(0,State0.BoardSize[2]-1)),
((0,State0.BoardSize[0]-1),(State0.BoardSize[1]/2+3,State0.BoardSize[1]-1),(0,State0.BoardSize[2]-1))
)
def getIp():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
def placeRandomUnits(PlayerID, NumberOfUnits, UnitsCord, FlagCord):
global State0
global AgentDict
while True:
SoldierPos = ( random.randint(UnitsCord[PlayerID][0][0],UnitsCord[PlayerID][0][1]),
random.randint(UnitsCord[PlayerID][1][0],UnitsCord[PlayerID][1][1]),
0 )
TruckPos = ( random.randint(UnitsCord[PlayerID][0][0],UnitsCord[PlayerID][0][1]),
random.randint(UnitsCord[PlayerID][1][0],UnitsCord[PlayerID][1][1]),
0 )
TankPos = ( random.randint(UnitsCord[PlayerID][0][0],UnitsCord[PlayerID][0][1]),
random.randint(UnitsCord[PlayerID][1][0],UnitsCord[PlayerID][1][1]),
0 )
if SoldierPos != TruckPos and SoldierPos != TankPos and TruckPos != TankPos:
break
AirplanePos = ( random.randint(UnitsCord[PlayerID][0][0],UnitsCord[PlayerID][0][1]),
random.randint(UnitsCord[PlayerID][1][0],UnitsCord[PlayerID][1][1]),
1 )
FlagPos = ( random.randint(FlagCord[PlayerID][0][0],FlagCord[PlayerID][0][1]),
random.randint(FlagCord[PlayerID][1][0],FlagCord[PlayerID][1][1]),
0 )
# Add the unit to the state
State0.Units[(PlayerID*5 + 0)] = SoldierClass((PlayerID*5 + 0),PlayerID, Health, Position=SoldierPos, Orientation=(0,1,0))
AgentDict[PlayerID]['Soldier'] = {"UnitID":PlayerID*5, "Position": SoldierPos, "Orientation":(0,1,0)}
State0.Units[(PlayerID*5 + 1)] = TruckClass((PlayerID*5 + 1),PlayerID, Health, Position=TruckPos, Orientation=(0,1,0))
AgentDict[PlayerID]['Truck'] = {"UnitID":PlayerID*5+1, "Position": TruckPos, "Orientation":(0,1,0)}
State0.Units[(PlayerID*5 + 2)] = TankClass((PlayerID*5 + 2),PlayerID, Health, Position=TankPos, Orientation=(0,1,0))
AgentDict[PlayerID]['Tank'] = {"UnitID":PlayerID*5+2, "Position": TankPos, "Orientation":(0,1,0)}
State0.Units[(PlayerID*5 + 3)] = AirplaneClass((PlayerID*5 + 3),PlayerID, Health, Position=AirplanePos, Orientation=(0,1,0))
AgentDict[PlayerID]['Airplane'] = {"UnitID":PlayerID*5+3, "Position": AirplanePos, "Orientation":(0,1,0)}
State0.Units[(PlayerID*5 + 4)] = FlagClass((PlayerID*5 + 4),PlayerID, Health, Position=FlagPos, Orientation=(0,1,0))
AgentDict[PlayerID]['Flag'] = {"UnitID":PlayerID*5+4, "Position": FlagPos, "Orientation":(0,1,0)}
State0.FlagPosition[PlayerID] = FlagPos
print(SoldierPos,TruckPos,TankPos,AirplanePos,FlagPos)
def broadcast(msg, verbose = False): # prefix is for name identification.
for id in Connections:
if verbose: print('now in ServerWithUI.py, broadcast, line 150 broadcasting msg of length ', len(msg)) # length of this message is about 190 bytes
Connections[id].send(msg)
def sendMessage(msg, conn, verbose = False):
message = msg.encode('utf-8')
if verbose: print('now in ServerWithUI.py, sendMessage, line 155 sending msg of length ', len(message))
conn.send(message)
if verbose: print('[Sent] '+msg, conn)
def initPositions(conn, PlayerID, TeamID, FlagPositions):
global idCount
global ReadyPlayers
global State0
global mTurn
global aPositions
global AgentDict
counter = 0
AgentDict[PlayerID] = {}
x = {"contents":"PlayerID","data":PlayerID}
print('now in ServerWithUI, initPositions, line 167 sending contents = data : PlayerID = PlayerID length ',len(pickle.dumps(x)))
conn.send(pickle.dumps(x))
init = True
while init:
try:
readable, writable, errored = select.select([conn], [], [],0)
for sock in readable:
if sock is conn:
print('now in ServerWithUI, initPositions, line 177 receiving data') # size of data received is about 10-12 bytes
data = conn.recv(2048).decode('utf-8')
print('size of data received is: ', len(data))
print('[Received] '+data)
x = {"contents":"TeamID","data":TeamID}
print('now in ServerWithUI, initPositions, line 182 sending TeamID length ',len(pickle.dumps(x))) # length is about 49 bytes
conn.send(pickle.dumps(x))
init = False
break
except:
continue
init = True
while init:
try:
readable, writable, errored = select.select([conn], [], [],0)
for sock in readable:
if sock is conn:
print('now in ServerWithUI, initPositions, line 196 receiving data')
data = conn.recv(1024)
print('size of data received is: ', len(data)) # length of data received is about 18 bytes
# Check if a message was received from the Client
if data:
data = pickle.loads(data)
print(data)
UnitID = list(data.keys())[0]
Position = data[UnitID]
UnitID = UnitID*5 + mTurn
curr_mod = modules[mTurn]
if mTurn == 4:
mTurn = 0
else:
mTurn += 1
next_mod = modules[mTurn]
newPos = Position
oldPos = aPositions[PlayerID][curr_mod]
if TeamID == 0 or curr_mod == "Flag":
Ori = (0,1,0)
else:
Ori = (0,-1,0)
d = {"id":PlayerID, "UnitID": UnitID, "currMod": curr_mod, "nextMod": next_mod, "oldPos": oldPos, "newPos":newPos, "newOri":Ori,"contents":"RemoteAgent" }
msg = pickle.dumps(d)
broadcast(msg)
aPositions[PlayerID][curr_mod] = newPos
if counter == 0:
State0.Units[(PlayerID*5 + 0)] = SoldierClass((PlayerID*5 + 0),PlayerID, Health, Position=newPos, Orientation=Ori)
AgentDict[PlayerID]['Soldier'] = {"UnitID":(PlayerID*5 + 0), "Position": newPos, "Orientation":Ori}
elif counter == 1:
State0.Units[(PlayerID*5 + 1)] = TruckClass((PlayerID*5 + 1),PlayerID, Health, Position=newPos, Orientation=Ori)
AgentDict[PlayerID]['Truck'] = {"UnitID":(PlayerID*5 + 1), "Position": newPos, "Orientation":Ori}
elif counter == 2:
State0.Units[(PlayerID*5 + 2)] = TankClass((PlayerID*5 + 2),PlayerID, Health, Position=newPos, Orientation=Ori)
AgentDict[PlayerID]['Tank'] = {"UnitID":(PlayerID*5 + 2), "Position": newPos, "Orientation":Ori}
elif counter == 3:
State0.Units[(PlayerID*5 + 3)] = AirplaneClass((PlayerID*5 + 3),PlayerID, Health, Position=newPos, Orientation=Ori)
AgentDict[PlayerID]['Airplane'] = {"UnitID":(PlayerID*5 + 3), "Position": newPos, "Orientation":Ori}
elif counter == 4:
State0.Units[(PlayerID*5 + 4)] = FlagClass((PlayerID*5 + 4),PlayerID, Health, Position=newPos, Orientation=Ori)
AgentDict[PlayerID]['Flag'] = {"UnitID":(PlayerID*5 + 4), "Position": newPos, "Orientation":Ori}
State0.FlagPosition[(PlayerID)] = newPos
ReadyPlayers += 1
counter = 0
break
counter += 1
else:
continue # only executed if the inner loop did NOT break
break
except:
if ReadyPlayers == NumberOfPlayers:
break
while Game.GameOn == 0:
if ReadyPlayers == NumberOfPlayers:
Game.GameOn = 1
return State0 # This might be redundant?
def build_board(BoardSize):
board = []
for i in range(BoardSize[0]):
for j in range(BoardSize[1]):
board.append(str(i)+', '+str(j))
return board
land_acts = {'doNothing': -0.1,
'turn-135': 0,
'turn-90': 0,
'turn-45': 0,
'turn0': 0,
'turn45': 0,
'turn90': 0,
'turn135': 0,
'turn180': 0,
'advance1': 0.00,
'shoot': 0,
'ram': 0.00}
air_acts = {'doNothing': 0,
'turn-135': 0,
'turn-90': 0,
'turn-45': 0,
'turn0': 0,
'turn45': 0,
'turn90': 0,
'turn135': 0,
'turn180': 0,
'advance0,-2': 0.9/12,
'advance-1,-1': 0.9/12,
'advance0,-1': 0.9/12,
'advance1,-1': 0.9/12,
'advance-2,0': 0.9/12,
'advance-1,0': 0.9/12,
'advance0,0': 0,
'advance1,0': 0.9/12,
'advance2,0': 0.9/12,
'advance-1,1': 0.9/12,
'advance0,1': 0.9/12,
'advance1,1': 0.9/12,
'advance0,2': 0.9/12,
'ascend': 0,
'descend': 0,
'shoot': 0.05,
'bomb': 0.05}
board = build_board(State0.BoardSize)
QTable = {}
QTable["land"] = {}
QTable["air"] = {}
orientations = ["0, 1","0, -1","1, 0","-1, 0","1, 1","1, -1","-1, 1","-1, -1"]
#populates value table
for state in board:
QTable["land"][state] = {}
for orient in orientations:
QTable["land"][state][orient] = land_acts.copy()
for state in board:
QTable["air"][state] = {}
for orient in orientations:
QTable["air"][state][orient] = air_acts.copy()
eps = -1
if GameType == '--test':
eps = 0
f = open('QTable.json')
QTable = json.load(f)
elif GameType == '--train':
eps = 0.3
TDAgentIDs = []
RandomAgentIDs = [0,1]
for RandomAgent in RandomAgentIDs:
AgentDict[idCount] = {}
if idCount < NumberOfPlayersPerTeam[0]:
TeamID = 0
else:
TeamID = 1
#GameDic[idCount] = TeamTDAgentClass(idCount,TeamID,QTable, None, eps)
GameDic[idCount] = TeamUniformRandomAgentClass(idCount, TeamID)
if NumberOfPlayersPerTeam[TeamID] == 1:
placeRandomUnits(idCount, NumberOfUnits[idCount], OnePlayerCord, OnePlayerCordFlag)
if NumberOfPlayersPerTeam[TeamID] == 2:
placeRandomUnits(idCount, NumberOfUnits[idCount], TwoPlayerCord, TwoPlayerCordFlag)
ReadyPlayers += 1
idCount += 1
GapLocation = (4,5,0)
GameDic[NumberOfPlayers] = TeamUniformRandomAgentClass(NumberOfPlayers,2)
NumUnits = sum(NumberOfUnits)
AgentDict[NumberOfPlayers] = {}
for Boulder in range(State0.BoardSize[0]):
if Boulder != GapLocation[0]:
State0.Units[NumUnits + Boulder] = WallClass(NumUnits+Boulder, NumberOfPlayers, 1, Position=(Boulder, GapLocation[1], GapLocation[2]), Orientation=(0,1,0))
AgentDict[NumberOfPlayers][f'Wall{Boulder}'] = {"UnitID":NumUnits + Boulder, "Position": (Boulder, GapLocation[1], GapLocation[2]), "Orientation":(0,1,0)}
def resetGame(QT,eps):
NewState0 = InitialState
for idCount in [0,1]:
NewGameDic[idCount] = TeamUniformRandomAgentClass(idCount,0)
for idCount in [2,3]:
#NewGameDic[idCount] = RemoteTeamAgentClass(idCount, 1, TeamHumanAgentClass, None, None, GameType)
NewGameDic[idCount] = TeamUniformRandomAgentClass(idCount, 1)
NewGame = TeamCaptureFlagClass(NewGameDic)
NewGame.type = GameType
NewState0.Players = range(len(NewGame.Agents))
eps -= 0.001
return NewGame,NewState0,eps
def sendResult(conn, Result):
"""
Transmits the `Result` to the connection `conn`
Parameters
----------
conn: [socket class]
Client that the message will be sent to
Result: [str]
Game result
"""
print('now in ServerWithUI, sendResult, line 371 sending pickled Result')
conn.send(pickle.dumps(Result))
print("Game Over. Press Ctrl-C to exit.")
conn.close()
if GameType == '--train':
RandomAgentIDs = [2,3]
RemoteAgentIDs = []
for Agent in RandomAgentIDs:
AgentDict[idCount] = {}
if idCount < NumberOfPlayersPerTeam[0]:
TeamID = 0
else:
TeamID = 1
#GameDic[idCount] = RemoteTeamAgentClass(idCount, TeamID, TeamHumanAgentClass, None, None, GameType)
GameDic[idCount] = TeamUniformRandomAgentClass(idCount, TeamID)
if NumberOfPlayersPerTeam[TeamID] == 1:
placeRandomUnits(idCount, NumberOfUnits[idCount], OnePlayerCord, OnePlayerCordFlag)
if NumberOfPlayersPerTeam[TeamID] == 2:
placeRandomUnits(idCount, NumberOfUnits[idCount],TwoPlayerCord, TwoPlayerCordFlag)
ReadyPlayers += 1
idCount += 1
#Game = TeamCaptureFlagHealthClass(GameDic)
Game = TeamCaptureFlagClass(GameDic)
State0.Players = range(len(Game.Agents))
Game.type = GameType
Game.AgentIDs = {"TDAgentIDs":TDAgentIDs,"RandomAgentIDs": RandomAgentIDs,"RemoteAgentIDs":RemoteAgentIDs}
Game.caller = "Server"
InitialState = deepcopy(State0)
NewGameDic = {}
NumIters = 50
score = {}
for i in range(NumIters):
print("\nGame Iteration:",i)
R = | |
"""
Copyright (c) 2019 The Cereja Project
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import ast
import gc
import math
import os
import threading
import time
from collections import OrderedDict, defaultdict
from importlib import import_module
import importlib
import sys
import types
import random
from typing import Any, Union, List, Tuple, Sequence, Iterable, Dict
import logging
import itertools
from copy import copy
import inspect
# Needed init configs
from ..config.cj_types import ClassType, FunctionType, Number
__all__ = ['CjTest', 'camel_to_snake', 'combine_with_all', 'fill', 'get_attr_if_exists',
'get_implements', 'get_instances_of', 'import_string',
'install_if_not', 'invert_dict', 'logger_level', 'module_references', 'set_log_level', 'time_format',
'string_to_literal', 'rescale_values', 'Source', 'sample', 'obj_repr', 'truncate', 'type_table_of',
'list_methods', 'can_do', 'chunk', 'is_iterable', 'is_indexable', 'is_sequence', 'is_numeric_sequence',
'clipboard',
'sort_dict', 'dict_append', 'to_tuple', 'dict_to_tuple', 'list_to_tuple', 'group_by', 'dict_values_len',
'dict_max_value', 'dict_min_value', 'dict_filter_value', 'get_zero_mask', 'get_batch_strides', 'Thread',
'prune_values']
logger = logging.getLogger(__name__)
_DICT_ITEMS_TYPE = type({}.items())
class Thread(threading.Thread):
def __init__(self, target, args=None, kwargs=None, name=None, daemon=None, callback=None):
while threading.active_count() > os.cpu_count() * 2:
time.sleep(0.1)
super().__init__(daemon=daemon, name=name)
if args is None:
args = ()
if kwargs is None:
kwargs = {}
self._func = target
self._args = args
self._kwargs = kwargs
self._callback = callback
def run(self):
res = self._func(*self._args, **self._kwargs)
if self._callback:
self._callback(res)
def is_indexable(v):
return hasattr(v, '__getitem__')
def chunk(data: Sequence, batch_size: int = None, fill_with: Any = None, is_random: bool = False,
max_batches: int = None) -> List[Union[Sequence, List, Tuple, Dict]]:
"""
e.g:
>>> import cereja as cj
>>> data = list(range(15))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
>>> cj.chunk(data, batch_size=4)
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14]]
>>> cj.chunk(data, batch_size=4, is_random=True, fill_with=0)
[[7, 2, 11, 4], [10, 6, 1, 13], [12, 9, 5, 0], [8, 3, 14, 0]]
>>> data = {"key1": 'value1', "key2": 'value2', "key3": 'value3', "key4": 'value4'}
>>> cj.chunk(data, batch_size=2,is_random=True)
[{'key3': 'value3', 'key2': 'value2'}, {'key1': 'value1', 'key4': 'value4'}]
@param data: Iterable data
@param batch_size: number of items per batch
@param fill_with: Any, but isn't valid for dict
@param is_random: shuffle data
@param max_batches: limit number of batches
@return: list of batches
"""
assert is_iterable(data) and len(data) > 0, f"Chunk isn't possible, because value {data} isn't valid."
if batch_size is None and max_batches is None:
return [data]
# used to return the same data type
__parser = None
if isinstance(data, (dict, tuple, set)):
__parser = type(data)
data = data.items() if isinstance(data, dict) else data
data = list(data) if isinstance(data, (set, tuple, str, bytes, bytearray, _DICT_ITEMS_TYPE)) else copy(data)
if not batch_size or batch_size > len(data) or batch_size < 1:
if isinstance(max_batches, (int, float)) and max_batches > 0:
batch_size = math.ceil(len(data) / max_batches)
else:
batch_size = len(data)
if is_random:
random.shuffle(data)
if max_batches is None:
max_batches = len(data) // batch_size if len(data) % batch_size == 0 else len(data) // batch_size + 1
batches = []
for i in range(0, len(data), batch_size):
result = data[i:i + batch_size]
if fill_with is not None and len(result) < batch_size:
result += [fill_with] * (batch_size - len(result))
batches.append(__parser(result) if __parser is not None else result)
max_batches -= 1
if not max_batches:
break
return batches
def _get_tkinter():
try:
from tkinter import Tk
except ImportError:
raise ValueError("Sorry. Isn't possible.")
return Tk()
def clipboard() -> str:
return _get_tkinter().clipboard_get()
def truncate(text: Union[str, bytes], k=15):
"""
Truncate text.
eg.:
>>> import cereja as cj
>>> cj.utils.truncate("Cereja is fun.", k=3)
'Cer...'
@param text: string or bytes
@param k: natural numbers, default is 4
"""
assert isinstance(text, (str, bytes)), TypeError(f"{type(text)} isn't valid. Expected str or bytes")
if k > len(text) or k <= 4:
return text
n = int((k - 4) / 2) # k is the max length of text, 4 is the length of truncate symbol
trunc_chars = '....' if isinstance(text, str) else b'....'
return text[:n] + trunc_chars + text[-n:]
def obj_repr(obj_, attr_limit=10, val_limit=3, show_methods=False, show_private=False, deep=3):
try:
if isinstance(obj_, (str, bytes)):
return truncate(obj_, k=attr_limit)
if isinstance(obj_, (bool, float, int, complex)):
return obj_
rep_ = []
if deep > 0:
for attr_ in dir(obj_):
if attr_.startswith('_') and not show_private:
continue
obj = obj_.__getattribute__(attr_)
if isinstance(obj, (str, bool, float, int, complex, bytes, bytearray)):
rep_.append(f'{attr_} = {obj_repr(obj)}')
continue
if callable(obj) and not show_methods:
continue
if is_iterable(obj):
temp_v = []
for k in obj:
if isinstance(obj, dict):
k = f'{k}:{type(obj[k])}'
elif is_iterable(k):
k = obj_repr(k, deep=deep)
deep -= 1
else:
k = str(k)
temp_v.append(k)
if len(temp_v) == val_limit:
break
temp_v = ', '.join(temp_v) # fix me, if bytes ...
obj = f'{obj.__class__.__name__}({temp_v} ...)'
rep_.append(f'{attr_} = {obj}')
if len(rep_) >= attr_limit:
rep_.append('...')
break
else:
return repr(obj_)
except Exception as err:
logger.error(err)
rep_ = []
rep_ = ',\n '.join(rep_)
__repr_template = f"""
{rep_}
"""
return f"{obj_.__class__.__name__} ({__repr_template})"
def can_do(obj: Any) -> List[str]:
"""
List methods and attributes of a Python object.
It is essentially the builtin `dir` function without the private methods and attributes
@param obj: Any
@return: list of attr names sorted by name
"""
return sorted([i for i in filter(lambda attr: not attr.startswith('_'), dir(obj))])
def sample(v: Sequence, k: int = None, is_random: bool = False) -> Union[list, dict, set, Any]:
"""
Get sample of anything
@param v: Any
@param k: int
@param is_random: default False
@return: sample iterable
"""
return chunk(v, batch_size=k, is_random=is_random, max_batches=1)[0]
def type_table_of(o: Union[list, tuple, dict]):
if isinstance(o, (list, tuple)):
type_table = {i: type(i) for i in o}
elif isinstance(o, dict):
type_table = {}
for k, v in o.items():
if isinstance(o, dict):
v = type_table_of(v)
type_table[k] = (v, type(v))
else:
type_table = {o: type(o)}
return type_table
def camel_to_snake(value: str):
snaked_ = []
for i, char in enumerate(value):
if not i == 0 and char.isupper():
char = f'_{char}'
snaked_.append(char)
return ''.join(snaked_).lower()
def get_implements(klass: type):
classes = klass.__subclasses__()
collected_classes = []
for k in classes:
k_classes = k.__subclasses__()
if k_classes:
collected_classes += get_implements(k)
if not k.__name__.startswith('_'):
collected_classes.append(k)
return collected_classes
def get_instances_of(klass: type):
return filter(lambda x: isinstance(x, klass), gc.get_objects())
def _invert_parser_key(key):
return to_tuple(key) if isinstance(key, (list, set, dict)) else key
def _invert_append(obj, k, v):
dict_append(obj, k, v)
if len(obj[k]) == 1:
obj[k] = obj[k][0]
def invert_dict(dict_: Union[dict, set]) -> dict:
"""
Inverts the key by value
e.g:
>>> example = {"a": "b", "c": "d"}
>>> invert_dict(example)
{"b" : "a", "d": "c"}
:return: dict
"""
if not isinstance(dict_, dict):
raise TypeError("Send a dict object.")
new_dict = {}
for key, value in dict_.items():
key = _invert_parser_key(key)
if isinstance(value, dict):
if key not in new_dict:
new_dict.update({key: invert_dict(value)})
else:
_invert_append(new_dict, key, invert_dict(value))
continue
if isinstance(value, (tuple, list, set)):
for k in dict_[key]:
k = _invert_parser_key(k)
_invert_append(new_dict, k, key)
continue
if value not in new_dict:
new_dict[value] = key
else:
value = _invert_parser_key(value)
_invert_append(new_dict, value, key)
return new_dict
def group_by(values, fn) -> dict:
"""
group items by result of fn (function)
eg.
>>> import cereja as cj
>>> values = ['joab', 'leite', 'da', 'silva', 'Neto', 'você']
>>> cj.group_by(values, lambda x: 'N' if x.lower().startswith('n') else 'OTHER')
# {'OTHER': ['joab', 'leite', 'da', 'silva', 'você'], 'N': ['Neto']}
@param values: list of values
@param fn: a function
"""
d = defaultdict(list)
for el in values:
d[fn(el)].append(el)
return dict(d)
def import_string(dotted_path):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError as err:
raise ImportError(f"{dotted_path} doesn't look like a module path") from err
module = import_module(module_path)
try:
return getattr(module, class_name)
| |
already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'TrackingNumber':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'TrackingNumber')
value_ = self.gds_validate_string(value_, node, 'TrackingNumber')
self.TrackingNumber = value_
self.TrackingNumber_nsprefix_ = child_.prefix
elif nodeName_ == 'TrackingNumberUniqueIdentifier':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'TrackingNumberUniqueIdentifier')
value_ = self.gds_validate_string(value_, node, 'TrackingNumberUniqueIdentifier')
self.TrackingNumberUniqueIdentifier = value_
self.TrackingNumberUniqueIdentifier_nsprefix_ = child_.prefix
elif nodeName_ == 'ShipDate':
sval_ = child_.text
dval_ = self.gds_parse_date(sval_)
self.ShipDate = dval_
self.ShipDate_nsprefix_ = child_.prefix
# end class UniqueTrackingNumber
class Weight(GeneratedsSuper):
"""The descriptive data for the heaviness of an object."""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, Units=None, Value=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.Units = Units
self.validate_WeightUnits(self.Units)
self.Units_nsprefix_ = None
self.Value = Value
self.Value_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, Weight)
if subclass is not None:
return subclass(*args_, **kwargs_)
if Weight.subclass:
return Weight.subclass(*args_, **kwargs_)
else:
return Weight(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_Units(self):
return self.Units
def set_Units(self, Units):
self.Units = Units
def get_Value(self):
return self.Value
def set_Value(self, Value):
self.Value = Value
def validate_WeightUnits(self, value):
result = True
# Validate type WeightUnits, a restriction on xs:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['KG', 'LB']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on WeightUnits' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def hasContent_(self):
if (
self.Units is not None or
self.Value is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='Weight', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('Weight')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'Weight':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Weight')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='Weight', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='Weight'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='Weight', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Units is not None:
namespaceprefix_ = self.Units_nsprefix_ + ':' if (UseCapturedNS_ and self.Units_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sUnits>%s</%sUnits>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.Units), input_name='Units')), namespaceprefix_ , eol_))
if self.Value is not None:
namespaceprefix_ = self.Value_nsprefix_ + ':' if (UseCapturedNS_ and self.Value_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sValue>%s</%sValue>%s' % (namespaceprefix_ , self.gds_format_decimal(self.Value, input_name='Value'), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'Units':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Units')
value_ = self.gds_validate_string(value_, node, 'Units')
self.Units = value_
self.Units_nsprefix_ = child_.prefix
# validate type WeightUnits
self.validate_WeightUnits(self.Units)
elif nodeName_ == 'Value' and child_.text:
sval_ = child_.text
fval_ = self.gds_parse_decimal(sval_, node, 'Value')
fval_ = self.gds_validate_decimal(fval_, node, 'Value')
self.Value = fval_
self.Value_nsprefix_ = child_.prefix
# end class Weight
class WebAuthenticationDetail(GeneratedsSuper):
"""Used in authentication of the sender's identity."""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, ParentCredential=None, UserCredential=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.ParentCredential = ParentCredential
self.ParentCredential_nsprefix_ = None
self.UserCredential = UserCredential
self.UserCredential_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, WebAuthenticationDetail)
if subclass is not None:
return subclass(*args_, **kwargs_)
if WebAuthenticationDetail.subclass:
return WebAuthenticationDetail.subclass(*args_, **kwargs_)
else:
return WebAuthenticationDetail(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_ParentCredential(self):
return self.ParentCredential
def set_ParentCredential(self, ParentCredential):
self.ParentCredential = ParentCredential
def get_UserCredential(self):
return self.UserCredential
def set_UserCredential(self, UserCredential):
self.UserCredential = UserCredential
def hasContent_(self):
if (
self.ParentCredential is not None or
self.UserCredential is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='WebAuthenticationDetail', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('WebAuthenticationDetail')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'WebAuthenticationDetail':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='WebAuthenticationDetail')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='WebAuthenticationDetail', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='WebAuthenticationDetail'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='WebAuthenticationDetail', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.ParentCredential is not None:
namespaceprefix_ = self.ParentCredential_nsprefix_ + ':' if (UseCapturedNS_ and self.ParentCredential_nsprefix_) else ''
self.ParentCredential.export(outfile, level, namespaceprefix_, namespacedef_='', name_='ParentCredential', pretty_print=pretty_print)
if self.UserCredential is not None:
namespaceprefix_ = self.UserCredential_nsprefix_ + ':' if (UseCapturedNS_ and self.UserCredential_nsprefix_) else ''
self.UserCredential.export(outfile, level, namespaceprefix_, namespacedef_='', name_='UserCredential', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'ParentCredential':
obj_ = WebAuthenticationCredential.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.ParentCredential = obj_
obj_.original_tagname_ = 'ParentCredential'
elif nodeName_ == 'UserCredential':
obj_ = WebAuthenticationCredential.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.UserCredential = obj_
obj_.original_tagname_ = 'UserCredential'
# end class WebAuthenticationDetail
class WebAuthenticationCredential(GeneratedsSuper):
"""Two part authentication string used for the sender's identity"""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, Key=None, Password=<PASSWORD>, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.Key = Key
self.Key_nsprefix_ = None
self.Password = Password
self.Password_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, WebAuthenticationCredential)
if subclass is not None:
return subclass(*args_, **kwargs_)
if WebAuthenticationCredential.subclass:
return WebAuthenticationCredential.subclass(*args_, **kwargs_)
else:
return WebAuthenticationCredential(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_Key(self):
return self.Key
def set_Key(self, Key):
self.Key = Key
def get_Password(self):
return self.Password
def set_Password(self, Password):
self.Password = Password
def hasContent_(self):
if (
self.Key is not None or
self.Password is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='WebAuthenticationCredential', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('WebAuthenticationCredential')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'WebAuthenticationCredential':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='WebAuthenticationCredential')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='WebAuthenticationCredential', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='WebAuthenticationCredential'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='WebAuthenticationCredential', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Key is not None:
namespaceprefix_ = self.Key_nsprefix_ + ':' if (UseCapturedNS_ and self.Key_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sKey>%s</%sKey>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.Key), input_name='Key')), namespaceprefix_ , eol_))
if self.Password is not None:
namespaceprefix_ = self.Password_nsprefix_ + | |
<reponame>sjzhai/Calendar-CIS422
'''
<NAME>, <NAME>, <NAME>, <NAME>
CIS 422
Simple Calendar Application
4/23/18
'''
#It would probably be a good idea to comment out the line of code at the bottom in the exception handler that prints the error details before submitting the assignment. It's pretty much necessary for debugging though.
# Reference some required python module syntax
from tkinter import *
from tkinter import messagebox
from tkinter import ttk
from tkinter.filedialog import askopenfilename
from time import *
import datetime
# Reference SQL database file, assist SQL file, and assist functions file
import CalendarLogic as CL
import CalendarEvent as CE
import functions as AF
# A function tool assist labels bind pop-up windows
from functools import partial
# Get boolean value assist whether close window
import builtins
import traceback
class Calendar(Frame):
def __init__(self, master, _year=None, _month=None, _day=None, _week=[], _database=None, _fulldatetime=None):
Frame.__init__(self, master)
self.year = _year
self.month = _month
self.day = _day
self.week = _week
self.database = _database
self.fulldatetime = _fulldatetime
# init 8 lists to store calendar labels, it is easy to get location
self.Daylist = []
self.Sunlist = []
self.Monlist = []
self.Tuelist = []
self.Wedlist = []
self.Thulist = []
self.Frilist = []
self.Satlist = []
self.day_list = [self.Sunlist, self.Monlist, self.Tuelist, self.Wedlist, self.Thulist, self.Frilist, self.Satlist]
self.widget_button_labelframe()
self.mainframe = LabelFrame(master)
self.mainframe.grid(row=0, column=1, padx=0, pady=15, sticky=N+W)
self.widget_date_labelframe(self.mainframe)
self.info_labelframe(self.mainframe)
self.widget_label_labelframe(self.mainframe)
self.widget_event_labelframe(self.mainframe)
def set_year(self, year):
self.year = year
def set_month(self, month):
self.month = month
def set_day(self, day):
self.day = day
def set_week(self, week):
self.week = week
def set_database(self, db):
self.database = db
def set_fulldatetime(self, fdatetime):
self.fulldatetime = fdatetime
def get_year(self):
return self.year
def get_month(self):
return self.month
def get_day(self):
return self.day
def get_week(self):
return self.week
def get_database(self):
return self.database
def get_fulldatetime(self):
return self.fulldatetime
def widget_button_labelframe(self):
'''(None) -> None
Create a button labelframe on left side,
it include 'ADD', 'SAVE', 'NEW', and 'LOAD' buttons.
'''
def newWin():
'''(None) -> None
This function is command function for ADD button,
click ADD button to pop-up new window.
'''
if self.namelabel['text'] == 'NAME':
messagebox.showinfo(title='Add Event Alert', message="You cannot add a event without name. Please load a '.db' file by click 'LOAD' button or create a new name by click 'NEW' button.")
else:
self.AddInfo()
def save_db():
CL.save_changes(self.get_database())
# print(CL.get_db_name(self.get_database()))
# c = (self.get_database().cursor().execute("SELECT * FROM calendar_model;"))
# print(c.fetchall())
# self.get_database().commit()
def nameDB():
'''(None) -> None
This function is command function for 'NEW' button,
click NEW button to pop-up new window.
'CONFIRM' button can store name
'''
def namebutton_clicked():
'''(None) -> None
Command function for 'CONFIRM' button,
Pop-up ask Yes or No message box can make user double check.
If user click 'yes', it can change the 'NAME' above the calendar
'''
self.quit = False
y_or_n = messagebox.askyesno('Verify', 'Are you sure the name for new DB?')
if y_or_n:
self.namelabel['text'] = self.nametext.get()
db_name = self.nametext.get()+'.db'
db = CL.load_calendar_model(db_name)
self.set_database(db)
self.name.destroy()
if self.namelabel['text'] != 'NAME':
messagebox.showinfo(title='NEW button Waring message', message="You already chose a name for your database.")
else:
self.name = Toplevel()
self.name.resizable(width=False, height=False)
self.name_label = Label(self.name, text='Input a name for a new database:').grid(row=0, column=0, sticky=W)
self.nametext = StringVar()
self.name_entry = Entry(self.name, width=25, textvariable=self.nametext).grid(row=1, column=0, sticky=W)
self.name_button = Button(self.name, text='CONFIRM', width=7, height=1, command=namebutton_clicked).grid(row=2, column=0)
'''NEED TO IMPLEMENT MORE '''
def openfiles():
'''(None) -> None
open ".db" files from local directories
'''
filename= askopenfilename(filetypes=(("SQL", ".db"),("All files", "*.*")))
if filename:
db_name = CL.load_db_name(filename)
name = db_name[:-3]
messagebox.showinfo(title='Load File Message', message="Sucessfully Load database File: \n'%s'" % name)
db = CL.load_calendar_model(db_name)
self.set_database(db)
self.namelabel['text'] = name
week = AF.get_this_week()
y = week[0][0:4]
m = week[0][5:7]
d = week[0][8:10]
events = CL.get_new_week(db, y, m, d)
for event in events:
timedate = event.timedate()
Time = timedate[0:10]
if Time in AF.get_this_week():
duration = event.duration()
s_time = timedate[0:4]+timedate[5:7]+timedate[8:10]+timedate[11:13]+timedate[14:16]+timedate[17:]
labels = AF.get_labels(s_time, duration, week, self.day_list)
for label in labels:
label['text'] = event.abbreviation()
label['bg'] = event.category()
self.buttonframe = LabelFrame(self.master)
self.buttonframe.grid(row=0, column=0, padx=15, pady=15, sticky=N+W)
self.titleLabel = Label(self.buttonframe, text='OPTIONS', borderwidth=2, relief='groove', width=8, height=1)
self.titleLabel.grid(row=0, column=0, pady=3)
# create "ADD" button to add event
self.addButton = Button(self.buttonframe, text='ADD', width=6, height=1, command=newWin)
self.addButton.grid(row=1, column=0, pady=3)
# create "SAVE" button to save information input at the bottom
self.saveButton = Button(self.buttonframe, text='SAVE', width=6, height=1, command=save_db)
self.saveButton.grid(row=2, column=0, pady=3)
# create "NEW" button to allow user input new database name
self.newButton = Button(self.buttonframe, text='NEW', width=6, height=1, command=nameDB)
self.newButton.grid(row=3, column=0, pady=3)
# create "LOAD" button to retrive and show event on calendar
self.loadButton = Button(self.buttonframe, text='LOAD', width=6, height=1, command=openfiles)
self.loadButton.grid(row=4, column=0, pady=3)
def widget_date_labelframe(self, mainframe):
'''(mainframe) -> None
This date labelframe build on mainframe,
user can input information in entries.
'Enter' button can arrange data and show dates by change date above "Mon, Tue,...".
'''
def getDate():
'''(None) -> None
This function is command function for 'ENTER' button.
It has
'''
y = self.ytext.get().strip(' ')
m = self.mtext.get().strip(' ')
d = self.dtext.get().strip(' ')
def isLeap(year):
'''(year) -> bool
Simply input year and function determines that the year
is leap year or not, then return True or False.
'''
if (year % 4) == 0:
if (year % 100) == 0:
if (year % 400) == 0:
return True
else:
return False
else:
return True
else:
return False
if AF.check_input_date(y,m,d): #This will run some extra checks to make sure the checks below can run without causing errors.
if len(y) == 4 and len(m) <= 2 and len(d) <= 2: # determine input range
if int(m) > 0 and int(m) < 13: # input month number between 1 and 12
if int(m) in [1,3,4,5,7,8,10,12]: # total month day 31
if int(d) > 31 or int(d) < 1:
messagebox.showinfo(title='Input Day Warning message', message="Input Month is "+m+". Please input day number(DD) between 1 and 31.")
elif int(m) in [3,6,9,11]: # total month day 30
if int(d) > 30 or int(d) < 1:
messagebox.showinfo(title='Input Day Warning message', message="Input Month is "+m+". Please input day number(DD) between 1 and 30.")
elif int(m) == 2: # month is Feb
if isLeap(int(y)): # leap year Feb has 29 days
if int(d) > 29 or int(d) < 1:
messagebox.showinfo(title='Input Day Warning message', message="Input year, "+y+", is leap year. Please note that there are 29 days in February.")
else: # normal year Feb has 28 days
if int(d) > 28 or int(d) < 1:
messagebox.showinfo(title='Input Day Warning message', message="Please input day number(DD) between 1 and 28. Please note that there are 28 days in February.")
else:
messagebox.showinfo(title='Input Month Warning message', message="Please input month number(MM) between 1 and 12. For example: 02")
else:
if len(y) != 4:
messagebox.showinfo(title='Input Year warning message', message="Please input year number(YYYY). For example: 2018")
elif len(m) > 2:
messagebox.showinfo(title='Input Month warning message', message="Please input month number(MM). For example: 10")
elif len(d) > 2:
messagebox.showinfo(title='Input Day warning message', message="Please input day number(DD). For example: 27")
self.set_year(str(y))
self.set_month(str(m))
self.set_day(str(d))
newWeek = AF.get_weekdays(self.get_year(), self.get_month(), self.get_day())
self.set_week(newWeek)
self.clear_labels()
else:
messagebox.showinfo(title='Inputs invalid', message="Please ensure you have entered a valid YYYY-MM-DD date.")
self.dateFrame = LabelFrame(self.mainframe)
self.dateFrame.grid(row=0, column=1, padx=5, pady=10, sticky=N+W)
# create Date label
self.dateLabel = Label(self.dateFrame, text='Input Date', borderwidth=2, relief='groove')
self.dateLabel.grid(row=0, column=0, padx=4, ipadx=4)
# create year(YYYY) label and entry box
self.yLabel = Label(self.dateFrame, text='Year(YYYY)').grid(row=0, column=1, sticky=E)
self.ytext = StringVar()
self.yinput = Entry(self.dateFrame, width=5, textvariable = self.ytext).grid(row=0, column=2)
# create month(MM)label and entry box
self.mLabel = Label(self.dateFrame, text='Month(MM)').grid(row=0, column=3, sticky=E)
self.mtext = StringVar()
self.minput = Entry(self.dateFrame, width=3, textvariable=self.mtext).grid(row=0, column=4)
# create day(DD) label and entry box
self.dLabel = Label(self.dateFrame, text='Day(DD)').grid(row=0, column=5, sticky=E)
self.dtext = StringVar()
self.dinput = Entry(self.dateFrame, width=3, textvariable = self.dtext).grid(row=0, column=6)
# create "ENTER" button to jump the week calendar
self.enterButton = Button(self.dateFrame, text='ENTER', width=4, height=1, command=getDate).grid(row=0, column=7, padx=3)
def unlock_bind(self, event):
'''(None) -> None
A function called by label bind method,
Unlocking binding relationship with mouse button
'''
return
def clear_labels(self):
'''(None) -> None
This function is convenient to call functions in same order repeatedly.
And function can clear all contents after some calendar actions.
'''
for label in self.day_list:
for i in range(49):
label[i]['text'] = ''
label[i]['bg'] = 'white'
label[i].bind('<Button-1>', self.unlock_bind)
self.getWeekEvent()
self.widget_label_labelframe(self.mainframe)
def info_labelframe(self, mainframe):
'''(mainframe) -> None
This info labelframe is build on mainframe,
Button 'PREV', and | |
import glob
import torch
from os import path as osp
import torch.utils.data as data
import utils.utils_video as utils_video
class VideoRecurrentTestDataset(data.Dataset):
"""Video test dataset for recurrent architectures, which takes LR video
frames as input and output corresponding HR video frames. Modified from
https://github.com/xinntao/BasicSR/blob/master/basicsr/data/reds_dataset.py
Supported datasets: Vid4, REDS4, REDSofficial.
More generally, it supports testing dataset with following structures:
dataroot
├── subfolder1
├── frame000
├── frame001
├── ...
├── subfolder1
├── frame000
├── frame001
├── ...
├── ...
For testing datasets, there is no need to prepare LMDB files.
Args:
opt (dict): Config for train dataset. It contains the following keys:
dataroot_gt (str): Data root path for gt.
dataroot_lq (str): Data root path for lq.
io_backend (dict): IO backend type and other kwarg.
cache_data (bool): Whether to cache testing datasets.
name (str): Dataset name.
meta_info_file (str): The path to the file storing the list of test
folders. If not provided, all the folders in the dataroot will
be used.
num_frame (int): Window size for input frames.
padding (str): Padding mode.
"""
def __init__(self, opt):
super(VideoRecurrentTestDataset, self).__init__()
self.opt = opt
self.cache_data = opt['cache_data']
self.gt_root, self.lq_root = opt['dataroot_gt'], opt['dataroot_lq']
self.data_info = {'lq_path': [], 'gt_path': [], 'folder': [], 'idx': [], 'border': []}
self.imgs_lq, self.imgs_gt = {}, {}
if 'meta_info_file' in opt:
with open(opt['meta_info_file'], 'r') as fin:
subfolders = [line.split(' ')[0] for line in fin]
subfolders_lq = [osp.join(self.lq_root, key) for key in subfolders]
subfolders_gt = [osp.join(self.gt_root, key) for key in subfolders]
else:
subfolders_lq = sorted(glob.glob(osp.join(self.lq_root, '*')))
subfolders_gt = sorted(glob.glob(osp.join(self.gt_root, '*')))
for subfolder_lq, subfolder_gt in zip(subfolders_lq, subfolders_gt):
# get frame list for lq and gt
subfolder_name = osp.basename(subfolder_lq)
img_paths_lq = sorted(list(utils_video.scandir(subfolder_lq, full_path=True)))
img_paths_gt = sorted(list(utils_video.scandir(subfolder_gt, full_path=True)))
max_idx = len(img_paths_lq)
assert max_idx == len(img_paths_gt), (f'Different number of images in lq ({max_idx})'
f' and gt folders ({len(img_paths_gt)})')
self.data_info['lq_path'].extend(img_paths_lq)
self.data_info['gt_path'].extend(img_paths_gt)
self.data_info['folder'].extend([subfolder_name] * max_idx)
for i in range(max_idx):
self.data_info['idx'].append(f'{i}/{max_idx}')
border_l = [0] * max_idx
for i in range(self.opt['num_frame'] // 2):
border_l[i] = 1
border_l[max_idx - i - 1] = 1
self.data_info['border'].extend(border_l)
# cache data or save the frame list
if self.cache_data:
print(f'Cache {subfolder_name} for VideoTestDataset...')
self.imgs_lq[subfolder_name] = utils_video.read_img_seq(img_paths_lq)
self.imgs_gt[subfolder_name] = utils_video.read_img_seq(img_paths_gt)
else:
self.imgs_lq[subfolder_name] = img_paths_lq
self.imgs_gt[subfolder_name] = img_paths_gt
# Find unique folder strings
self.folders = sorted(list(set(self.data_info['folder'])))
self.sigma = opt['sigma'] / 255. if 'sigma' in opt else 0 # for non-blind video denoising
def __getitem__(self, index):
folder = self.folders[index]
if self.sigma:
# for non-blind video denoising
if self.cache_data:
imgs_gt = self.imgs_gt[folder]
else:
imgs_gt = utils_video.read_img_seq(self.imgs_gt[folder])
torch.manual_seed(0)
noise_level = torch.ones((1, 1, 1, 1)) * self.sigma
noise = torch.normal(mean=0, std=noise_level.expand_as(imgs_gt))
imgs_lq = imgs_gt + noise
t, _, h, w = imgs_lq.shape
imgs_lq = torch.cat([imgs_lq, noise_level.expand(t, 1, h, w)], 1)
else:
# for video sr and deblurring
if self.cache_data:
imgs_lq = self.imgs_lq[folder]
imgs_gt = self.imgs_gt[folder]
else:
imgs_lq = utils_video.read_img_seq(self.imgs_lq[folder])
imgs_gt = utils_video.read_img_seq(self.imgs_gt[folder])
return {
'L': imgs_lq,
'H': imgs_gt,
'folder': folder,
'lq_path': self.imgs_lq[folder],
}
def __len__(self):
return len(self.folders)
class SingleVideoRecurrentTestDataset(data.Dataset):
"""Single ideo test dataset for recurrent architectures, which takes LR video
frames as input and output corresponding HR video frames (only input LQ path).
More generally, it supports testing dataset with following structures:
dataroot
├── subfolder1
├── frame000
├── frame001
├── ...
├── subfolder1
├── frame000
├── frame001
├── ...
├── ...
For testing datasets, there is no need to prepare LMDB files.
Args:
opt (dict): Config for train dataset. It contains the following keys:
dataroot_gt (str): Data root path for gt.
dataroot_lq (str): Data root path for lq.
io_backend (dict): IO backend type and other kwarg.
cache_data (bool): Whether to cache testing datasets.
name (str): Dataset name.
meta_info_file (str): The path to the file storing the list of test
folders. If not provided, all the folders in the dataroot will
be used.
num_frame (int): Window size for input frames.
padding (str): Padding mode.
"""
def __init__(self, opt):
super(SingleVideoRecurrentTestDataset, self).__init__()
self.opt = opt
self.cache_data = opt['cache_data']
self.lq_root = opt['dataroot_lq']
self.data_info = {'lq_path': [], 'folder': [], 'idx': [], 'border': []}
self.imgs_lq = {}
if 'meta_info_file' in opt:
with open(opt['meta_info_file'], 'r') as fin:
subfolders = [line.split(' ')[0] for line in fin]
subfolders_lq = [osp.join(self.lq_root, key) for key in subfolders]
else:
subfolders_lq = sorted(glob.glob(osp.join(self.lq_root, '*')))
for subfolder_lq in subfolders_lq:
# get frame list for lq and gt
subfolder_name = osp.basename(subfolder_lq)
img_paths_lq = sorted(list(utils_video.scandir(subfolder_lq, full_path=True)))
max_idx = len(img_paths_lq)
self.data_info['lq_path'].extend(img_paths_lq)
self.data_info['folder'].extend([subfolder_name] * max_idx)
for i in range(max_idx):
self.data_info['idx'].append(f'{i}/{max_idx}')
border_l = [0] * max_idx
for i in range(self.opt['num_frame'] // 2):
border_l[i] = 1
border_l[max_idx - i - 1] = 1
self.data_info['border'].extend(border_l)
# cache data or save the frame list
if self.cache_data:
print(f'Cache {subfolder_name} for VideoTestDataset...')
self.imgs_lq[subfolder_name] = utils_video.read_img_seq(img_paths_lq)
else:
self.imgs_lq[subfolder_name] = img_paths_lq
# Find unique folder strings
self.folders = sorted(list(set(self.data_info['folder'])))
def __getitem__(self, index):
folder = self.folders[index]
if self.cache_data:
imgs_lq = self.imgs_lq[folder]
else:
imgs_lq = utils_video.read_img_seq(self.imgs_lq[folder])
return {
'L': imgs_lq,
'folder': folder,
'lq_path': self.imgs_lq[folder],
}
def __len__(self):
return len(self.folders)
class VideoTestVimeo90KDataset(data.Dataset):
"""Video test dataset for Vimeo90k-Test dataset.
It only keeps the center frame for testing.
For testing datasets, there is no need to prepare LMDB files.
Args:
opt (dict): Config for train dataset. It contains the following keys:
dataroot_gt (str): Data root path for gt.
dataroot_lq (str): Data root path for lq.
io_backend (dict): IO backend type and other kwarg.
cache_data (bool): Whether to cache testing datasets.
name (str): Dataset name.
meta_info_file (str): The path to the file storing the list of test
folders. If not provided, all the folders in the dataroot will
be used.
num_frame (int): Window size for input frames.
padding (str): Padding mode.
"""
def __init__(self, opt):
super(VideoTestVimeo90KDataset, self).__init__()
self.opt = opt
self.cache_data = opt['cache_data']
if self.cache_data:
raise NotImplementedError('cache_data in Vimeo90K-Test dataset is not implemented.')
self.gt_root, self.lq_root = opt['dataroot_gt'], opt['dataroot_lq']
self.data_info = {'lq_path': [], 'gt_path': [], 'folder': [], 'idx': [], 'border': []}
neighbor_list = [i + (9 - opt['num_frame']) // 2 for i in range(opt['num_frame'])]
with open(opt['meta_info_file'], 'r') as fin:
subfolders = [line.split(' ')[0] for line in fin]
for idx, subfolder in enumerate(subfolders):
gt_path = osp.join(self.gt_root, subfolder, 'im4.png')
self.data_info['gt_path'].append(gt_path)
lq_paths = [osp.join(self.lq_root, subfolder, f'im{i}.png') for i in neighbor_list]
self.data_info['lq_path'].append(lq_paths)
self.data_info['folder'].append('vimeo90k')
self.data_info['idx'].append(f'{idx}/{len(subfolders)}')
self.data_info['border'].append(0)
self.pad_sequence = opt.get('pad_sequence', False)
def __getitem__(self, index):
lq_path = self.data_info['lq_path'][index]
gt_path = self.data_info['gt_path'][index]
imgs_lq = utils_video.read_img_seq(lq_path)
img_gt = utils_video.read_img_seq([gt_path])
img_gt.squeeze_(0)
if self.pad_sequence: # pad the sequence: 7 frames to 8 frames
imgs_lq = torch.cat([imgs_lq, imgs_lq[-1:,...]], dim=0)
return {
'L': imgs_lq, # (t, c, h, w)
'H': img_gt, # (c, h, w)
'folder': self.data_info['folder'][index], # folder name
'idx': self.data_info['idx'][index], # e.g., 0/843
'border': self.data_info['border'][index], # 0 for non-border
'lq_path': lq_path[self.opt['num_frame'] // 2] # center frame
}
def __len__(self):
return len(self.data_info['gt_path'])
class SingleVideoRecurrentTestDataset(data.Dataset):
"""Single Video test dataset (only input LQ path).
Supported datasets: Vid4, REDS4, REDSofficial.
More generally, it supports testing dataset with following structures:
dataroot
├── subfolder1
├── frame000
├── frame001
├── ...
├── subfolder1
├── frame000
├── frame001
├── ...
├── ...
For testing datasets, there is no need to prepare LMDB files.
Args:
opt (dict): Config for train dataset. It contains the following keys:
dataroot_gt (str): Data root path for gt.
dataroot_lq (str): Data root path for lq.
io_backend (dict): IO backend type and other kwarg.
cache_data (bool): Whether to cache testing datasets.
name (str): Dataset name.
meta_info_file (str): The path to the file storing the list of test
folders. If not provided, all the folders in the dataroot will
be used.
num_frame (int): Window size for input frames.
padding (str): Padding mode.
"""
def __init__(self, opt):
super(SingleVideoRecurrentTestDataset, self).__init__()
self.opt = opt
self.cache_data = opt['cache_data']
self.lq_root = opt['dataroot_lq']
self.data_info = {'lq_path': [], 'folder': [], 'idx': [], 'border': []}
# file client (io backend)
self.file_client = None
self.imgs_lq = {}
if 'meta_info_file' in opt:
with open(opt['meta_info_file'], 'r') as fin:
subfolders = [line.split(' ')[0] for line in fin]
subfolders_lq = [osp.join(self.lq_root, key) for key in subfolders]
else:
subfolders_lq = sorted(glob.glob(osp.join(self.lq_root, '*')))
for subfolder_lq in subfolders_lq:
# get frame list for lq and gt
subfolder_name = osp.basename(subfolder_lq)
img_paths_lq = sorted(list(utils_video.scandir(subfolder_lq, full_path=True)))
max_idx = len(img_paths_lq)
self.data_info['lq_path'].extend(img_paths_lq)
self.data_info['folder'].extend([subfolder_name] * max_idx)
for i in range(max_idx):
self.data_info['idx'].append(f'{i}/{max_idx}')
border_l = [0] * max_idx
| |
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Tracking particles across the globe with GLobCurrent data and FES data for M2+S2+K1+O1 tidal field
@author: <NAME>
"""
from parcels import Field, FieldSet, ParticleSet, JITParticle, Variable, AdvectionRK4, ErrorCode
from netCDF4 import Dataset
import numpy as np
import math
import datetime
from datetime import timedelta
from operator import attrgetter
""" ----- Computation of tidal currents ----- """
t0 = datetime.datetime(1900,1,1,0,0) # origin of time = 1 January 1900, 00:00:00 UTC
starttime = datetime.datetime(2002,1,1,0,0) # time when the simulation starts = 1 January 2002, 00:00:00 UTC
endtime = datetime.datetime(2014,12,31,21,0) # time when the simulation ends = 31 December 2014, 21:00:00 UTC
def TidalMotionM2S2K1O1(particle, fieldset, time, dt):
"""
Kernel that calculates tidal currents U and V due to M2, S2, K1 and O1 tide at particle location and time
and advects the particle in these currents (using Euler forward scheme)
Calculations based on Doodson (1921) and Schureman (1958)
"""
# Number of Julian centuries that have passed between t0 and time
t = ((time + fieldset.t0rel)/86400.0)/36525.0
# Define constants to compute astronomical variables T, h, s, N (all in degrees) (source: FES2014 code)
cT0 = 180.0
ch0 = 280.1895
cs0 = 277.0248
cN0 = 259.1568; cN1 = -1934.1420
deg2rad = math.pi/180.0
# Calculation of factors T, h, s at t0 (source: Doodson (1921))
T0 = math.fmod(cT0, 360.0) * deg2rad
h0 = math.fmod(ch0, 360.0) * deg2rad
s0 = math.fmod(cs0, 360.0) * deg2rad
# Calculation of V(t0) (source: Schureman (1958))
V_M2 = 2*T0 + 2*h0 - 2*s0
V_S2 = 2*T0
V_K1 = T0 + h0 - 0.5*math.pi
V_O1 = T0 + h0 - 2*s0 + 0.5*math.pi
# Calculation of factors N, I, nu, xi at time (source: Schureman (1958))
# Since these factors change only very slowly over time, we take them as constant over the time step dt
N = math.fmod(cN0 + cN1*t, 360.0) * deg2rad
I = math.acos(0.91370 - 0.03569*math.cos(N))
tanN = math.tan(0.5*N)
at1 = math.atan(1.01883 * tanN)
at2 = math.atan(0.64412 * tanN)
nu = at1 - at2
xi = -at1 - at2 + N
nuprim = math.atan(math.sin(2*I) * math.sin(nu)/(math.sin(2*I)*math.cos(nu) + 0.3347))
# Calculation of u, f at current time (source: Schureman (1958))
u_M2 = 2*xi - 2*nu
f_M2 = (math.cos(0.5*I))**4/0.9154
u_S2 = 0
f_S2 = 1
u_K1 = -nuprim
f_K1 = math.sqrt(0.8965*(math.sin(2*I))**2 + 0.6001*math.sin(2*I)*math.cos(nu) + 0.1006)
u_O1 = 2*xi - nu
f_O1 = math.sin(I)*(math.cos(0.5*I))**2/0.3800
# Fourth-order Runge-Kutta methode to advect particle in tidal currents
# ----------------------- STEP 1 -----------------------
# Tidal fields have longitudes defined from 0...360 degrees (so -180...0 --> 180...360)
if particle.lon < 0:
lon = particle.lon + 360
else:
lon = particle.lon
# Zonal amplitudes and phaseshifts at particle location and time
Uampl_M2_1 = f_M2 * fieldset.UaM2[time, lon, particle.lat, particle.depth]
Upha_M2_1 = V_M2 + u_M2 - fieldset.UgM2[time, lon, particle.lat, particle.depth]
Uampl_S2_1 = f_S2 * fieldset.UaS2[time, lon, particle.lat, particle.depth]
Upha_S2_1 = V_S2 + u_S2 - fieldset.UgS2[time, lon, particle.lat, particle.depth]
Uampl_K1_1 = f_K1 * fieldset.UaK1[time, lon, particle.lat, particle.depth]
Upha_K1_1 = V_K1 + u_K1 - fieldset.UgK1[time, lon, particle.lat, particle.depth]
Uampl_O1_1 = f_O1 * fieldset.UaO1[time, lon, particle.lat, particle.depth]
Upha_O1_1 = V_O1 + u_O1 - fieldset.UgO1[time, lon, particle.lat, particle.depth]
# Meridional amplitudes and phaseshifts at particle location and time
Vampl_M2_1 = f_M2 * fieldset.VaM2[time, lon, particle.lat, particle.depth]
Vpha_M2_1 = V_M2 + u_M2 - fieldset.VgM2[time, lon, particle.lat, particle.depth]
Vampl_S2_1 = f_S2 * fieldset.VaS2[time, lon, particle.lat, particle.depth]
Vpha_S2_1 = V_S2 + u_S2 - fieldset.VgS2[time, lon, particle.lat, particle.depth]
Vampl_K1_1 = f_K1 * fieldset.VaK1[time, lon, particle.lat, particle.depth]
Vpha_K1_1 = V_K1 + u_K1 - fieldset.VgK1[time, lon, particle.lat, particle.depth]
Vampl_O1_1 = f_O1 * fieldset.VaO1[time, lon, particle.lat, particle.depth]
Vpha_O1_1 = V_O1 + u_O1 - fieldset.VgO1[time, lon, particle.lat, particle.depth]
# Zonal and meridional tidal currents; time + fieldset.t0rel = number of seconds elapsed between t0 and time
Uvel_M2_1 = Uampl_M2_1 * math.cos(fieldset.omegaM2 * (time + fieldset.t0rel) + Upha_M2_1)
Uvel_S2_1 = Uampl_S2_1 * math.cos(fieldset.omegaS2 * (time + fieldset.t0rel) + Upha_S2_1)
Uvel_K1_1 = Uampl_K1_1 * math.cos(fieldset.omegaK1 * (time + fieldset.t0rel) + Upha_K1_1)
Uvel_O1_1 = Uampl_O1_1 * math.cos(fieldset.omegaO1 * (time + fieldset.t0rel) + Upha_O1_1)
Vvel_M2_1 = Vampl_M2_1 * math.cos(fieldset.omegaM2 * (time + fieldset.t0rel) + Vpha_M2_1)
Vvel_S2_1 = Vampl_S2_1 * math.cos(fieldset.omegaS2 * (time + fieldset.t0rel) + Vpha_S2_1)
Vvel_K1_1 = Vampl_K1_1 * math.cos(fieldset.omegaK1 * (time + fieldset.t0rel) + Vpha_K1_1)
Vvel_O1_1 = Vampl_O1_1 * math.cos(fieldset.omegaO1 * (time + fieldset.t0rel) + Vpha_O1_1)
# Total zonal and meridional velocity
U1 = Uvel_M2_1 + Uvel_S2_1 + Uvel_K1_1 + Uvel_O1_1 # total zonal velocity
V1 = Vvel_M2_1 + Vvel_S2_1 + Vvel_K1_1 + Vvel_O1_1 # total meridional velocity
# New lon + lat
lon_1, lat_1 = (particle.lon + U1*0.5*dt, particle.lat + V1*0.5*dt)
# ----------------------- STEP 2 -----------------------
if lon_1 < 0:
lon_1 += 360
# Zonal amplitudes and phaseshifts at particle location and time
Uampl_M2_2 = f_M2 * fieldset.UaM2[time + 0.5*dt, lon_1, lat_1, particle.depth]
Upha_M2_2 = V_M2 + u_M2 - fieldset.UgM2[time + 0.5*dt, lon_1, lat_1, particle.depth]
Uampl_S2_2 = f_S2 * fieldset.UaS2[time + 0.5*dt, lon_1, lat_1, particle.depth]
Upha_S2_2 = V_S2 + u_S2 - fieldset.UgS2[time + 0.5*dt, lon_1, lat_1, particle.depth]
Uampl_K1_2 = f_K1 * fieldset.UaK1[time + 0.5*dt, lon_1, lat_1, particle.depth]
Upha_K1_2 = V_K1 + u_K1 - fieldset.UgK1[time + 0.5*dt, lon_1, lat_1, particle.depth]
Uampl_O1_2 = f_O1 * fieldset.UaO1[time + 0.5*dt, lon_1, lat_1, particle.depth]
Upha_O1_2 = V_O1 + u_O1 - fieldset.UgO1[time + 0.5*dt, lon_1, lat_1, particle.depth]
# Meridional amplitudes and phaseshifts at particle location and time
Vampl_M2_2 = f_M2 * fieldset.VaM2[time + 0.5*dt, lon_1, lat_1, particle.depth]
Vpha_M2_2 = V_M2 + u_M2 - fieldset.VgM2[time + 0.5*dt, lon_1, lat_1, particle.depth]
Vampl_S2_2 = f_S2 * fieldset.VaS2[time + 0.5*dt, lon_1, lat_1, particle.depth]
Vpha_S2_2 = V_S2 + u_S2 - fieldset.VgS2[time + 0.5*dt, lon_1, lat_1, particle.depth]
Vampl_K1_2 = f_K1 * fieldset.VaK1[time + 0.5*dt, lon_1, lat_1, particle.depth]
Vpha_K1_2 = V_K1 + u_K1 - fieldset.VgK1[time + 0.5*dt, lon_1, lat_1, particle.depth]
Vampl_O1_2 = f_O1 * fieldset.VaO1[time + 0.5*dt, lon_1, lat_1, particle.depth]
Vpha_O1_2 = V_O1 + u_O1 - fieldset.VgO1[time + 0.5*dt, lon_1, lat_1, particle.depth]
# Zonal and meridional tidal currents; time + fieldset.t0rel = number of seconds elapsed between t0 and time
Uvel_M2_2 = Uampl_M2_2 * math.cos(fieldset.omegaM2 * (time + 0.5*dt + fieldset.t0rel) + Upha_M2_2)
Uvel_S2_2 = Uampl_S2_2 * math.cos(fieldset.omegaS2 * (time + 0.5*dt + fieldset.t0rel) + Upha_S2_2)
Uvel_K1_2 = Uampl_K1_2 * math.cos(fieldset.omegaK1 * (time + 0.5*dt + fieldset.t0rel) + Upha_K1_2)
Uvel_O1_2 = Uampl_O1_2 * math.cos(fieldset.omegaO1 * (time + 0.5*dt + fieldset.t0rel) + Upha_O1_2)
Vvel_M2_2 = Vampl_M2_2 * math.cos(fieldset.omegaM2 * (time + 0.5*dt + fieldset.t0rel) + Vpha_M2_2)
Vvel_S2_2 = Vampl_S2_2 * math.cos(fieldset.omegaS2 * (time + 0.5*dt + fieldset.t0rel) + Vpha_S2_2)
Vvel_K1_2 = Vampl_K1_2 * math.cos(fieldset.omegaK1 * (time + 0.5*dt + fieldset.t0rel) + Vpha_K1_2)
Vvel_O1_2 = Vampl_O1_2 * math.cos(fieldset.omegaO1 * (time + 0.5*dt + fieldset.t0rel) + Vpha_O1_2)
# Total zonal and meridional velocity
U2 = Uvel_M2_2 + Uvel_S2_2 + Uvel_K1_2 + Uvel_O1_2 # total zonal velocity
V2 = Vvel_M2_2 + Vvel_S2_2 + Vvel_K1_2 + Vvel_O1_2 # total meridional velocity
# New lon + lat
lon_2, lat_2 = (particle.lon + U2*0.5*dt, particle.lat + V2*0.5*dt)
# ----------------------- STEP 3 -----------------------
if lon_2 < 0:
lon_2 += 360
# Zonal amplitudes and phaseshifts at particle location and time
Uampl_M2_3 = f_M2 * fieldset.UaM2[time + 0.5*dt, lon_2, lat_2, particle.depth]
Upha_M2_3 = V_M2 + u_M2 - fieldset.UgM2[time + 0.5*dt, lon_2, lat_2, particle.depth]
Uampl_S2_3 = f_S2 * fieldset.UaS2[time + 0.5*dt, lon_2, lat_2, particle.depth]
Upha_S2_3 = V_S2 + u_S2 - fieldset.UgS2[time + 0.5*dt, lon_2, lat_2, particle.depth]
Uampl_K1_3 = f_K1 * fieldset.UaK1[time + 0.5*dt, lon_2, lat_2, particle.depth]
Upha_K1_3 = V_K1 + u_K1 - fieldset.UgK1[time + 0.5*dt, lon_2, lat_2, particle.depth]
Uampl_O1_3 = f_O1 * fieldset.UaO1[time + 0.5*dt, lon_2, lat_2, particle.depth]
Upha_O1_3 = V_O1 + u_O1 - fieldset.UgO1[time + 0.5*dt, lon_2, lat_2, particle.depth]
# Meridional amplitudes and phaseshifts at particle location and time
Vampl_M2_3 = f_M2 * fieldset.VaM2[time + 0.5*dt, lon_2, lat_2, particle.depth]
Vpha_M2_3 = V_M2 + u_M2 - fieldset.VgM2[time + 0.5*dt, lon_2, lat_2, particle.depth]
Vampl_S2_3 = f_S2 * fieldset.VaS2[time + 0.5*dt, | |
<reponame>schubergphilis/cloudstack<gh_stars>1-10
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" BVT tests for Virtual Machine IAM effect
"""
#Import Local Modules
import marvin
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from marvin.codes import FAILED
from nose.plugins.attrib import attr
#Import System modules
import time
_multiprocess_shared_ = True
class Services:
"""Test VM Life Cycle Services
"""
def __init__(self):
self.services = {
#data for domains and accounts
"domain1": {
"name": "Domain1",
},
"account1A": {
"email": "<EMAIL>",
"firstname": "test1A",
"lastname": "User",
"username": "test1A",
"password": "password",
},
"account1B": {
"email": "<EMAIL>",
"firstname": "test1B",
"lastname": "User",
"username": "test1B",
"password": "password",
},
"domain2": {
"name": "Domain2",
},
"account2A": {
"email": "<EMAIL>",
"firstname": "test2A",
"lastname": "User",
"username": "test2A",
"password": "password",
},
#data reqd for virtual machine creation
"virtual_machine1A" : {
"name" : "test1Avm",
"displayname" : "Test1A VM",
},
"virtual_machine1B" : {
"name" : "test1Bvm",
"displayname" : "Test1B VM",
},
"virtual_machine2A" : {
"name" : "test2Avm",
"displayname" : "Test2A VM",
},
#small service offering
"service_offering": {
"small": {
"name": "Small Instance",
"displaytext": "Small Instance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 128,
},
},
"ostype": 'CentOS 5.6 (64-bit)',
# iam group and policy information
"service_desk_iam_grp" : {
"name" : "Service Desk",
"description" : "Service Desk IAM Group"
},
"vm_readonly_iam_policy" : {
"name" : "VM Read Only Access",
"description" : "VM read only access iam policy"
},
}
class TestVMIam(cloudstackTestCase):
@classmethod
def setUpClass(self):
testClient = super(TestVMIam, self).getClsTestClient()
self.apiclient = testClient.getApiClient()
self.services = Services().services
# backup default apikey and secretkey
self.default_apikey = self.apiclient.connection.apiKey
self.default_secretkey = self.apiclient.connection.securityKey
# Create domains and accounts etc
self.domain_1 = Domain.create(
self.apiclient,
self.services["domain1"]
)
self.domain_2 = Domain.create(
self.apiclient,
self.services["domain2"]
)
# Create two accounts for doamin_1
self.account_1A = Account.create(
self.apiclient,
self.services["account1A"],
admin=False,
domainid=self.domain_1.id
)
self.account_1B = Account.create(
self.apiclient,
self.services["account1B"],
admin=False,
domainid=self.domain_1.id
)
# Create an account for domain_2
self.account_2A = Account.create(
self.apiclient,
self.services["account2A"],
admin=False,
domainid=self.domain_2.id
)
# Fetch user details to register apiKey for them
self.user_1A = User.list(
self.apiclient,
account=self.account_1A.name,
domainid=self.account_1A.domainid
)[0]
user_1A_key = User.registerUserKeys(
self.apiclient,
self.user_1A.id
)
self.user_1A_apikey = user_1A_key.apikey
self.user_1A_secretkey = user_1A_key.secretkey
self.user_1B = User.list(
self.apiclient,
account=self.account_1B.name,
domainid=self.account_1B.domainid
)[0]
user_1B_key = User.registerUserKeys(
self.apiclient,
self.user_1B.id
)
self.user_1B_apikey = user_1B_key.apikey
self.user_1B_secretkey = user_1B_key.secretkey
self.user_2A = User.list(
self.apiclient,
account=self.account_2A.name,
domainid=self.account_2A.domainid
)[0]
user_2A_key = User.registerUserKeys(
self.apiclient,
self.user_2A.id
)
self.user_2A_apikey = user_2A_key.apikey
self.user_2A_secretkey = user_2A_key.secretkey
# create service offering
self.service_offering = ServiceOffering.create(
self.apiclient,
self.services["service_offering"]["small"]
)
self.zone = get_zone(self.apiclient, testClient.getZoneForTests())
self.services['mode'] = self.zone.networktype
self.template = get_template(self.apiclient, self.zone.id, self.services["ostype"])
# deploy 3 VMs for three accounts
self.virtual_machine_1A = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine1A"],
accountid=self.account_1A.name,
zoneid=self.zone.id,
domainid=self.account_1A.domainid,
serviceofferingid=self.service_offering.id,
templateid=self.template.id
)
self.virtual_machine_1B = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine1B"],
accountid=self.account_1B.name,
zoneid=self.zone.id,
domainid=self.account_1B.domainid,
serviceofferingid=self.service_offering.id,
templateid=self.template.id
)
self.virtual_machine_2A = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine2A"],
accountid=self.account_2A.name,
zoneid=self.zone.id,
domainid=self.account_2A.domainid,
serviceofferingid=self.service_offering.id,
templateid=self.template.id
)
self.srv_desk_grp = IAMGroup.create(
self.apiclient,
self.services["service_desk_iam_grp"]
)
self.vm_read_policy = IAMPolicy.create(
self.apiclient,
self.services["vm_readonly_iam_policy"]
)
self.srv_desk_grp.attachPolicy(
self.apiclient, [self.vm_read_policy]
)
vm_grant_policy_params = {}
vm_grant_policy_params['name'] = "policyGrantVirtualMachine" + self.virtual_machine_1A.id
vm_grant_policy_params['description'] = "Policy to grant permission to VirtualMachine " + self.virtual_machine_1A.id
self.vm_grant_policy = IAMPolicy.create(
self.apiclient,
vm_grant_policy_params
)
self._cleanup = [
self.account_1A,
self.account_1B,
self.domain_1,
self.account_2A,
self.domain_2,
self.service_offering,
self.vm_read_policy,
self.srv_desk_grp,
self.vm_grant_policy
]
@classmethod
def tearDownClass(self):
self.apiclient = super(TestVMIam, self).getClsTestClient().getApiClient()
cleanup_resources(self.apiclient, self._cleanup)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
def tearDown(self):
# restore back default apikey and secretkey
self.apiclient.connection.apiKey = self.default_apikey
self.apiclient.connection.securityKey = self.default_secretkey
cleanup_resources(self.apiclient, self.cleanup)
return
@attr(tags = ["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "selfservice"])
def test_01_list_own_vm(self):
# listVM command should return owne's VM
self.debug("Listing VM for account: %s" % self.account_1A.name)
self.apiclient.connection.apiKey = self.user_1A_apikey
self.apiclient.connection.securityKey = self.user_1A_secretkey
list_vm_response = list_virtual_machines(
self.apiclient
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
self.assertEqual(
len(list_vm_response),
1,
"Check VM available in List Virtual Machines"
)
self.assertEqual(
list_vm_response[0].name,
self.virtual_machine_1A.name,
"Virtual Machine names do not match"
)
self.debug("Listing VM for account: %s" % self.account_1B.name)
self.apiclient.connection.apiKey = self.user_1B_apikey
self.apiclient.connection.securityKey = self.user_1B_secretkey
list_vm_response = list_virtual_machines(
self.apiclient
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
self.assertEqual(
len(list_vm_response),
1,
"Check VM available in List Virtual Machines"
)
self.assertEqual(
list_vm_response[0].name,
self.virtual_machine_1B.name,
"Virtual Machine names do not match"
)
self.debug("Listing VM for account: %s" % self.account_2A.name)
self.apiclient.connection.apiKey = self.user_2A_apikey
self.apiclient.connection.securityKey = self.user_2A_secretkey
list_vm_response = list_virtual_machines(
self.apiclient
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
self.assertEqual(
len(list_vm_response),
1,
"Check VM available in List Virtual Machines"
)
self.assertEqual(
list_vm_response[0].name,
self.virtual_machine_2A.name,
"Virtual Machine names do not match"
)
return
@attr(tags = ["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "selfservice"])
def test_02_grant_domain_vm(self):
# Validate the following
# 1. Grant domain2 VM access to account_1B
# 2. listVM command should return account_1B and domain_2 VMs.
self.debug("Granting Domain %s VM read only access to account: %s" % (self.domain_2.name, self.account_1B.name))
self.srv_desk_grp.addAccount(self.apiclient, [self.account_1B])
domain_permission = {}
domain_permission['action'] = "listVirtualMachines"
domain_permission['entitytype'] = "VirtualMachine"
domain_permission['scope'] = "DOMAIN"
domain_permission['scopeid'] = self.domain_2.id
self.vm_read_policy.addPermission(self.apiclient, domain_permission)
self.debug("Listing VM for account: %s" % self.account_1B.name)
self.apiclient.connection.apiKey = self.user_1B_apikey
self.apiclient.connection.securityKey = self.user_1B_secretkey
list_vm_response = list_virtual_machines(
self.apiclient
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
self.assertEqual(
len(list_vm_response),
2,
"Check VM available in List Virtual Machines"
)
list_vm_names = [list_vm_response[0].name, list_vm_response[1].name]
self.assertEqual( self.virtual_machine_1B.name in list_vm_names,
True,
"Accessible Virtual Machine names do not match"
)
self.assertEqual( self.virtual_machine_2A.name in list_vm_names,
True,
"Accessible Virtual Machine names do not match"
)
return
@attr(tags = ["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "selfservice"])
def test_03_grant_account_vm(self):
# Validate the following
# 1. Grant account_1A VM access to account_1B
# 2. listVM command should return account_1A and account_1B VMs.
self.debug("Granting Account %s VM read only access to account: %s" % (self.account_1A.name, self.account_1B.name))
account_permission = {}
account_permission['action'] = "listVirtualMachines"
account_permission['entitytype'] = "VirtualMachine"
account_permission['scope'] = "ACCOUNT"
account_permission['scopeid'] = self.account_1A.id
self.vm_read_policy.addPermission(self.apiclient, account_permission)
self.debug("Listing VM for account: %s" % self.account_1B.name)
self.apiclient.connection.apiKey = self.user_1B_apikey
self.apiclient.connection.securityKey = self.user_1B_secretkey
list_vm_response = list_virtual_machines(
self.apiclient
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
self.assertEqual(
len(list_vm_response),
3,
"Check VM available in List Virtual Machines"
)
list_vm_names = [list_vm_response[0].name, list_vm_response[1].name, list_vm_response[2].name]
self.assertEqual( self.virtual_machine_1B.name in list_vm_names,
True,
"Accessible Virtual Machine names do not match"
)
self.assertEqual( self.virtual_machine_1A.name in list_vm_names,
True,
"Accessible Virtual Machine names do not match"
)
self.assertEqual( self.virtual_machine_2A.name in list_vm_names,
True,
"Accessible Virtual Machine names do not match"
)
return
@attr(tags = ["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "selfservice"])
def test_04_revoke_account_vm(self):
# Validate the following
# 1. Revoke account_1A VM access from account_1B
# 2. listVM command should not return account_1A VMs.
self.debug("Revoking Account %s VM read only access from account: %s" % (self.account_1A.name, self.account_1B.name))
account_permission = {}
account_permission['action'] = "listVirtualMachines"
account_permission['entitytype'] = "VirtualMachine"
account_permission['scope'] = "ACCOUNT"
account_permission['scopeid'] = self.account_1A.id
self.vm_read_policy.removePermission(self.apiclient, account_permission)
self.debug("Listing VM for account: %s" % self.account_1B.name)
self.apiclient.connection.apiKey = self.user_1B_apikey
self.apiclient.connection.securityKey = self.user_1B_secretkey
list_vm_response = list_virtual_machines(
self.apiclient
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
self.assertEqual(
len(list_vm_response),
2,
"Check VM available in List Virtual Machines"
)
list_vm_names = [list_vm_response[0].name, list_vm_response[1].name]
self.assertEqual( self.virtual_machine_1A.name in list_vm_names,
False,
"Accessible Virtual Machine names do not match"
)
return
@attr(tags = ["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "selfservice"])
def test_05_revoke_domain_vm(self):
# Validate the following
# 1. | |
<gh_stars>1-10
from gurobipy import GRB, Model, quicksum, tuplelist
from itertools import combinations
# subtour Function gets min(cycle) in TupleList input "edges"
# Original from https://www.gurobi.com/documentation/9.0/examples/tsp_py.html
def subtour(edges):
global houses_per_period
n = houses_per_period
unvisited = list(range(n))
cycle = range(n+1)
while unvisited:
thiscycle = []
neighbors = unvisited
while neighbors:
current = neighbors[0]
thiscycle.append(current)
unvisited.remove(current)
neighbors = [j for i, j in edges.select(current, '*') if j in unvisited]
if len(cycle) > len(thiscycle):
cycle = thiscycle
return cycle
#route_me_for class that optimizes route for <parameters> under <constraints>
class route_me_for():
def __init__(self, WORKABLE_DAYS, PERIODS, TOTAL_HOUSES, DIJ_PARAM,
QUIX_DEMAND, OMO_DEMAND, VELOCITY, DATASET,
HOUSES_PER_DAY, HOUSES_PER_PERIOD, BIG_M, RESULT_ID):
self.WORKABLE_DAYS = 1
self.PERIODS = 1
self.TOTAL_HOUSES = TOTAL_HOUSES
self._Dij = DIJ_PARAM
self.QUIX_DEMAND = QUIX_DEMAND
self.OMO_DEMAND = OMO_DEMAND
self.VELOCITY = VELOCITY
self.DATASET = DATASET
self.HOUSES_PER_DAY = HOUSES_PER_DAY
self.HOUSES_PER_PERIOD = HOUSES_PER_PERIOD
self.result_id = RESULT_ID
self._M = BIG_M
self.set_model()
self.result_helper()
self.set_setter()
self.set_parameters()
self.set_variables()
self.get_x()
self.set_constraints()
self.set_var_relations()
self.set_optimize()
self.fix_sub()
# Defines model
def set_model(self):
self.m = Model()
# Makes getting the final result simpler
def result_helper(self):
global lazy_glob, callbacks_glob, houses_per_period
houses_per_period = self.HOUSES_PER_PERIOD
lazy_glob = 0
callbacks_glob = 0
# Sets Sets
def set_setter(self):
# Refer to: N
self._N = list(range(0, self.HOUSES_PER_PERIOD))
# Refer to: T
self._T = list(range(0, self.WORKABLE_DAYS))
# Refer to: P
self._P = list(range(0, self.PERIODS))
# Refer to: N/a
# Type: Python List of List of Lists of Dictionaries imported from datasets
# Description: Represents each house as a dictionary with its attributes as keys
self._C = self.DATASET
# Refer to: Wjpt
def _Wjpt(self, j,p,t):
return self._C[t][p][j]['Wjpt']
# Refer to: Ujpt
def _Ujpt(self, j,p,t):
return self._C[t][p][j]['Ujpt']
# Sets Parameters
def set_parameters(self):
# Refer to: N/a
# Type: Python int
# Description: Defines how many houses are expected to be in a day
self._H = self.HOUSES_PER_DAY
# Refer to: N/a
# Type: Python int
# Description: Defines how many houses are expected to be in a period
self._G = self.HOUSES_PER_PERIOD
# Refer to: fp
self._fp = 60
# Refer to: V
self._V = self.VELOCITY
# Refer to: a
self._a = self.OMO_DEMAND
# Refer to: b
self._b = self.QUIX_DEMAND
# Sets variables
def set_variables(self):
# Refer to: Xijpt
self._Xijpt = self.m.addVars(self._N, self._N, self._P, self._T, vtype=GRB.BINARY, name='Xijpt')
# Refer to: Ajpt
self._Ajpt = self.m.addVars(self._N, self._P, self._T, lb=0, vtype=GRB.INTEGER, name='Ajpt')
# Refer to: Bjpt
self._Bjpt = self.m.addVars(self._N, self._P, self._T, lb=0, vtype=GRB.INTEGER, name='Bjpt')
# Refer to: Zjpt
self._Zjpt = self.m.addVars(self._N, self._P, self._T, vtype=GRB.BINARY, name='Zijpt')
# Refer to: Ojpt
self._Ojpt = self.m.addVars(self._N, self._P, self._T, vtype=GRB.BINARY, name='Ojpt')
# Refer to: Qjpt
self._Qjpt = self.m.addVars(self._N, self._P, self._T, vtype=GRB.BINARY, name='Qjpt')
# Refer to: Hjpt
self._Hjpt = self.m.addVars(self._N, self._P, self._T, lb=0, vtype=GRB.INTEGER, name='Hjpt')
# Sets constraints
def set_constraints(self):
# Refer to: 1. Unique exit
for i in self._N:
self.m.addConstr(sum(self._Xijpt[i, j, p, t] for j in self._N for t in self._T for p in self._P if i != j) == 1)
# Refer to: 2. Unique entry
for j in self._N:
self.m.addConstr(sum(self._Xijpt[i, j, p, t] for t in self._T for p in self._P for i in self._N if j != i) == 1)
# 3. Max houses in a day cannot be more than max house ammount
for t in self._T:
self.m.addConstr(sum(self._Xijpt[i, j, p, t] for p in self._P for i in self._N for j in self._N if j != i) <= self._H)
# 4. Max houses in a period cannot be more than max house ammount
for t in self._T:
for p in self._P:
self.m.addConstr(sum(self._Xijpt[i, j, p, t] for i in self._N for j in self._N if j != i) <= self._G)
# 5. i,j House route must belong to set day
# Includes a pythonian fix
dia_aux = [self._N[x:x + self.HOUSES_PER_DAY] for x in range(0, len(self._N), self.HOUSES_PER_DAY)]
for t in self._T:
for p in self._P:
for i in self._N:
for j in self._N:
if i not in dia_aux[t]:
self.m.addConstr(self._Xijpt[i, j, p, t] == 0)
if j not in dia_aux[t]:
self.m.addConstr(self._Xijpt[i, j, p, t] == 0)
# 6. i,j House route must belong to set period
# Includes a pythonian fix
period_aux = [self._N[x:x + self.PERIODS] for x in range(0, len(self._N), self.PERIODS)]
counter = 0
for day in period_aux:
for i in range(len(day)):
day[i] = counter
if counter == self.PERIODS-1:
counter = 0
else:
counter+=1
periods = []
for sublist in period_aux:
for element in sublist:
periods.append(element)
for t in self._T:
for p in self._P:
for i in self._N:
for j in self._N:
if periods[i] != p:
self.m.addConstr(self._Xijpt[i, j, p, t] == 0)
if periods[j] != p:
self.m.addConstr(self._Xijpt[i, j, p, t] == 0)
# 11. Las siguientes 8 restricciones fueron las agregadas por fotografias
for t in self._T:
for p in self._P:
for j in self._N[:-1]:
self.m.addConstr(self._Bjpt[j+1, p, t] == self._Bjpt[j, p, t] - self._Wjpt(j, p, t))
for t in self._T:
for p in self._P:
for j in self._N[:-1]:
self.m.addConstr(self._Ajpt[j+1, p, t] == self._Ajpt[j, p, t] - self._Ujpt(j, p, t))
for t in self._T:
for p in self._P:
for j in self._N:
self.m.addConstr(self._Ojpt[j, p, t] * self._Ujpt(j, p, t) <= self._Ajpt[j, p, t])
for t in self._T:
for p in self._P:
for j in self._N:
self.m.addConstr(self._Qjpt[j, p, t] * self._Wjpt(j, p, t) <= self._Bjpt[j, p, t])
# Sets variable relations
def set_var_relations(self):
# 8.1 Omo binary under request binary
for t in self._T:
for p in self._P:
for j in self._N:
self.m.addConstr(self._Zjpt[j, p, t] <= self._Ojpt[j,p,t] + self._Qjpt[j,p,t])
# 8.2 Quix binary under request binary
for t in self._T:
for p in self._P:
for j in self._N:
self.m.addConstr((self._Qjpt[j, p, t] + self._Ojpt[j,p,t]) <= self._Zjpt[j, p, t] * 2)
# 8.3 Trace binary summation?
for t in self._T:
for p in self._P:
for j in self._N:
#if i != j:
# $COMMENT If statement not recognizing i? changed to addConstr function
self.m.addConstr(sum(self._Xijpt[i, j, p, t] for i in self._N if i != j) == self._Zjpt[j, p, t])
# 8.5 Request restriction with Big-M?
for t in self._T:
for p in self._P:
for j in self._N:
self.m.addConstr(self._Ujpt(j, p, t) <= self._Ojpt[j, p, t] * self._M)
# 8.5 Request restriction with Big-M?
for t in self._T:
for p in self._P:
for j in self._N:
pass
self.m.addConstr(self._Wjpt(j, p, t) <= self._Qjpt[j, p, t] * self._M)
# Optimizes set objective and eliminates Outputs
def set_optimize(self):
obj = sum(self._Dij[i][j] * self._Xijpt[i, j, p, t] for i in self._N for j in self._N for p in self._P for t in self._T if i != j)
self.m.setObjective(obj, GRB.MINIMIZE)
self.m.Params.OutputFlag = 0
# Gets all variables named Xijpt for subtourelim and subtour methods
def get_x(self):
global var_x
var_x = []
var_x = self._Xijpt
# subtourelim staticmethod asigns Lazy restrictions if Callback and len(Subtour) satisfaction conditions are met
# Adapted from original at https://www.gurobi.com/documentation/9.0/examples/tsp_py.html
@staticmethod
def subtourelim(model, where):
global var_x, lazy_glob, callbacks_glob, houses_per_period
n = houses_per_period
if where == GRB.Callback.MIPSOL:
vals = model.cbGetSolution(var_x)
selected = tuplelist()
for i in range(houses_per_period):
for j in range(houses_per_period):
if vals[i, j, 0, 0] > 0.5 :
selected.append((i,j))
tour = subtour(selected)
if len(tour) < n:
combinationF = list(combinations(tour, 2))
if len(combinationF) == 1:
expr = len(tour) - 1
model.cbLazy(var_x[combinationF[0][1], combinationF[0][0], 0, 0] + var_x[combinationF[0][0], combinationF[0][1], 0, 0] <= expr)
lazy_glob += 1
else:
expr = len(tour) - 1
sumarapida = 0
for i in tour:
for j in tour:
sumarapida += var_x[i, j, 0, 0] + var_x[j, i, 0, 0]
model.cbLazy(sumarapida <= expr)
lazy_glob += 1
callbacks_glob += 1
# fix_sub function optimizes for subtourelim and "actual" tuplelist of edges
# Adapted from original at https://www.gurobi.com/documentation/9.0/examples/tsp_py.html
def fix_sub(self):
global var_x
n = self.HOUSES_PER_PERIOD
self.m.Params.lazyConstraints = 1
self.m.optimize(self.subtourelim)
selected = tuplelist()
for i in range(self.HOUSES_PER_PERIOD):
for j in range(self.HOUSES_PER_PERIOD):
if var_x[i, j, 0, 0].X > 0.5:
selected.append((i,j))
tour = subtour(selected)
assert len(tour) == n
self.tourfinal = tour
self.m.write('sols/SolutionForID_'+ str(self.result_id) + '.sol')
# Drops final solution | |
required")
params = {
"type": cfr_part,
"fromDate": from_date,
"toDate": to_date,
"page": page,
}
return self._request("GetParts", params)
def get_all_manufacturers(
self, manufacturer_type: str = None, page: int = 1
) -> List[Dict[str, Any]]:
"""Return a list of vPIC manufacturers of the given manufacturer_type.
This provides a list of all the Manufacturers available in vPIC Dataset.
See ``get_vehicle_variable_values_list("Manufacturer Type")`` for the list
of manufacturer types.
Args:
manufacturer_type: The manufacturer type, which is Incomplete Vehicles,
Completed Vehicle Manufacturer, Incomplete Vehicle Manufacturer,
Intermediate Manufacturer, Final-Stage Manufacturer, Alterer,
Replica Vehicle Manufacturer. You can pass the full type name, or a
substring of the type.
page: results are paginated; this is the page number to return
Examples:
>>> get_all_manufacturers("Completed Vehicle", 1)
[
{
"Country": "UNITED STATES (USA)",
"Mfr_CommonName": "Tesla",
"Mfr_ID": 955,
"Mfr_Name": "TESLA, INC.",
"VehicleTypes": [
{
"IsPrimary": true,
"Name": "Passenger Car"
},
{
"IsPrimary": false,
"Name": "Multipurpose Passenger Vehicle (MPV)"
}
},
...
]
"""
params = {"ManufacturerType": manufacturer_type, "page": page}
return self._request("GetAllManufacturers", params)
def get_manufacturer_details(
self, manufacturer: Union[str, int]
) -> List[Dict[str, Any]]:
"""Returns details for one or more manufacturers.
Args:
manufacturer: Pass the Manufacturer Id (int) or the complete
manufacturer name (str) to return detail for a single
manufacturer. Pass a partial name to return manufacturers
with names that include the partial name.
Examples:
>>> get_manufacturer_details(988)
[
{
"Address": "1919 Torrance Blvd.",
"Address2": null,
"City": "Torrance",
"ContactEmail": "<EMAIL>",
"ContactFax": null,
"ContactPhone": "(310)783-3401",
"Country": "UNITED STATES (USA)",
"DBAs": "...",
"EquipmentItems": [],
"LastUpdated": "/Date(1618422117803-0400)/",
"ManufacturerTypes": [
{
"Name": "Completed Vehicle Manufacturer"
}
],
"Mfr_CommonName": "Honda",
"Mfr_ID": 988,
"Mfr_Name": "HONDA DEVELOPMENT & MANUFACTURING OF AMERICA, LLC",
"OtherManufacturerDetails": null,
"PostalCode": "90501",
"PrimaryProduct": null,
"PrincipalFirstName": "<NAME>",
"PrincipalLastName": null,
"PrincipalPosition": "President & CEO",
"StateProvince": "CALIFORNIA",
"SubmittedName": "<NAME>",
"SubmittedOn": "/Date(1618286400000-0400)/",
"SubmittedPosition": "Sr. Specialist, II",
"VehicleTypes": [
{
"GVWRFrom": "Class 1A: 3,000 lb or less (1,360 kg or less)",
"GVWRTo": "Class 1D: 5,001 - 6,000 lb (2,268 - 2,722 kg)",
"IsPrimary": true,
"Name": "Passenger Car"
},
{
"GVWRFrom": "Class 2E: 6,001 - 7,000 lb (2,722 - 3,175 kg)",
"GVWRTo": "Class 2E: 6,001 - 7,000 lb (2,722 - 3,175 kg)",
"IsPrimary": false,
"Name": "Truck "
},
{
"GVWRFrom": "Class 1B: 3,001 - 4,000 lb (1,360 - 1,814 kg)",
"GVWRTo": "Class 2E: 6,001 - 7,000 lb (2,722 - 3,175 kg)",
"IsPrimary": false,
"Name": "Multipurpose Passenger Vehicle (MPV)"
}
]
}
...
]
"""
if manufacturer is None:
raise ValueError("manufacturer is required")
return self._request(f"GetManufacturerDetails/{manufacturer}")
def get_makes_for_manufacturer(
self, manufacturer: Union[str, int], model_year: int = None
) -> List[Dict[str, Any]]:
"""Returns makes produced by a manufacturer or manufacturers.
Args:
manufacturer: Pass the Manufacturer Id (int) or the complete
manufacturer name (str) to return detail for a single manufacturer.
Pass a partial name to return manufacturers with names that include
the partial name.
model_year: Pass a model year to return only those makes made by
the manufacturer for that model year.
Raises:
ValueError: if ``manufacturer`` is missing
Examples:
>>> get_makes_for_manufacturer(988)
[
{
"MakeId": 474,
"MakeName": "HONDA",
"Mfr_Name": "HONDA DEVELOPMENT & MANUFACTURING OF AMERICA, LLC"
},
{
"MakeId": 475,
"MakeName": "ACURA",
"Mfr_Name": "HONDA DEVELOPMENT & MANUFACTURING OF AMERICA, LLC"
}
...
]
"""
if manufacturer is None:
raise ValueError("manufacturer is required")
if model_year:
results = self._request(
f"GetMakesForManufacturerAndYear/{manufacturer}", {"year": model_year}
)
else:
results = self._request(f"GetMakeForManufacturer/{manufacturer}")
return results
def get_makes_for_vehicle_type(self, vehicle_type: str) -> List[Dict[str, Any]]:
"""Returns makes that produce a vehicle_type
Args:
vehicle_type: A vPIC vehicle_type. For example, "Passenger Car",
"Truck", or "Multipurpose Passenger Vehicle (MPV)". If you pass
a partial vehicle_type, for example "Passenger", results will
include makes for all matching vehicle types. Matching is not
case sensitive.
Raises:
ValueError: if ``vehicle_type`` is missing
Examples:
>>> get_makes_for_vehicle_type('Car')
[
{
"MakeId": 440,
"MakeName": "<NAME>",
"VehicleTypeId": 2,
"VehicleTypeName": "Passenger Car"
},
{
"MakeId": 441,
"MakeName": "TESLA",
"VehicleTypeId": 2,
"VehicleTypeName": "Passenger Car"
},
...
]
"""
if vehicle_type is None:
raise ValueError("vehicle_type is required")
return self._request(f"GetMakesForVehicleType/{vehicle_type.rstrip()}")
def get_vehicle_types_for_make(self, make: Union[str, int]) -> List[Dict[str, Any]]:
"""Returns vehicle types produced by a make or make
Args:
make: Pass the MakeId (int) or the complete make name (str) to return
vehicle types for a single manufacturer. Pass a partial make name
to return vehicle types for all makes that match the partial name.
When you pass a make name, results will include the MakeId and
MakeName because you may get vehicle_types for more than one make.
Raises:
ValueError: if ``make`` is missing
Examples:
>>> get_vehicle_types_for_make(474)
[
{
"VehicleTypeId": 1,
"VehicleTypeName": "Motorcycle"
},
{
"VehicleTypeId": 2,
"VehicleTypeName": "Passenger Car"
},
{
"VehicleTypeId": 3,
"VehicleTypeName": "Truck "
},
{
"VehicleTypeId": 7,
"VehicleTypeName": "Multipurpose Passenger Vehicle (MPV)"
},
{
"VehicleTypeId": 9,
"VehicleTypeName": "Low Speed Vehicle (LSV)"
}
]
>>> get_vehicle_types_for_make('kia')
[
{
"MakeId": 499,
"MakeName": "KIA",
"VehicleTypeId": 2,
"VehicleTypeName": "Passenger Car"
},
{
"MakeId": 499,
"MakeName": "KIA",
"VehicleTypeId": 7,
"VehicleTypeName": "Multipurpose Passenger Vehicle (MPV)"
},
{
"MakeId": 5848,
"MakeName": "MGS GRAND SPORT (MARDIKIAN)",
"VehicleTypeId": 2,
"VehicleTypeName": "Passenger Car"
}
]
"""
if make is None:
raise ValueError("make is required")
if isinstance(make, int):
return self._request(f"GetVehicleTypesForMakeId/{make}")
else:
return self._request(f"GetVehicleTypesForMake/{make}")
def get_equipment_plant_codes(
self, year: int, equipment_type: int, report_type: str = "All"
) -> List[Dict[str, Any]]:
"""Returns a list of plants that manufacture certain vehicle equipment.
Plants have a unique three-character U.S. Department of Transportation
(DOT) code. vPIC API documentation says this API only accepts 2016 and
later.
Args:
year: must be 2016 or later
equipment_type: return plants that manufacture one of these equipment
types: 1 = Tires; 3 = Brake Hoses; 13 = Glazing; 16 = Retread
report_type: must be one of
New = plants whose code was assigned during the selected year
Updated = plants whose data was modified during the selected year
Closed = plants that are no longer active
All = all active and closed plants, regardless of year
Raises:
ValueError: if ``year`` is earlier than 2016
Example:
>>> get_equipment_plant_codes(2016, 1)
[
{
"Address": "2950 INTERNATIONAL BLVD.",
"City": "CLARKSVILLE",
"Country": "USA",
"DOTCode": "00T",
"Name": "HANKOOK TIRE MANUFACTURING TENNESSEE, LP",
"OldDotCode": "",
"PostalCode": "37040",
"StateProvince": "TENNESSEE",
"Status": "Active"
},
...
]
"""
if year < 2016:
raise ValueError("Year must be 2016 or later")
params = {
"year": year,
"equipmentType": equipment_type,
"reportType": report_type,
}
return self._request("GetEquipmentPlantCodes", params)
def get_models_for_make(
self, make: Union[int, str], model_year: int = None, vehicle_type: str = None
) -> List[Dict[str, Any]]:
"""Return a list of models for a make or makes.
Optionally filter the results by model year and vehicle type.
Args:
make: Pass the MakeId (int) or the complete make name (str) to return
vehicle types for a single manufacturer. Pass a partial make name
to return vehicle types for all makes that match the partial name.
When you pass a make name, results will include the MakeId and
MakeName because you may get vehicle_types for more than one make.
model_year: pass this to return models made in this model year
vehicle_type: one of the vPIC vehicle_types (for example, "Passenger Car",
"Truck", or "Multipurpose Passenger Vehicle (MPV)")
Raises:
ValueError: if ``year`` is earlier than 2016
Examples:
>>> get_models_for_make("TESLA", model_year=2020)
[
{
"MakeId": 441,
"MakeName": "TESLA",
"ModelId": 1685,
"ModelName": "Model S"
},
{
"MakeId": 441,
"MakeName": "TESLA",
"ModelId": 10199,
"ModelName": "Model X"
},
{
"MakeId": 441,
"MakeName": "TESLA",
"ModelId": 17834,
"ModelName": "Model 3"
},
{
"MakeId": 441,
"MakeName": "TESLA",
"ModelId": 27027,
"ModelName": "Model Y"
}
]
VehicleTypeId and VehicleTypeName are only returned
when you specify vehicle_type.
"""
if make is None:
raise ValueError("make is required")
if model_year or vehicle_type:
my = f"/modelyear/{model_year}" if model_year else ""
vt = f"/vehicletype/{vehicle_type}" if vehicle_type else ""
if isinstance(make, int):
endpoint = f"GetModelsForMakeIdYear/makeId/{make}{my}{vt}"
else:
endpoint = f"GetModelsForMakeYear/make/{make}{my}{vt}"
else:
if isinstance(make, int):
endpoint = f"GetModelsForMakeId/{make}"
else:
endpoint = f"GetModelsForMake/{make}"
return self._request(endpoint)
def get_vehicle_variable_list(self) -> List[Dict[str, Any]]:
"""Return a list of vehicle variables tracked by vPIC
Examples:
>>> get_vehicle_variable_list()
[
{
"DataType": "string",
"Description": "<p>Any other battery information that does...",
"Id": 1,
"Name": "Other Battery Info"
},
{
"DataType": "lookup",
"Description": "<p>Battery | |
= False
self.sub_task.suspend_time = 0
self.sub_task.suspend_remain_time = 0
self.updateTask()
if self.process is None:
return
try:
if is_debug():
p = psutil.Process(pid=self.process.pid)
p.resume()
return
pos_process = self.get_pos_process()
if pos_process:
pos_process.resume()
except:
pass
@property
def priority(self):
if self.process is None:
return psutil.NORMAL_PRIORITY_CLASS
try:
pos_process = self.get_pos_process()
if pos_process:
return pos_process.nice()
except Exception as e:
pass
return psutil.NORMAL_PRIORITY_CLASS
@priority.setter
def priority(self, prio):
if self.process is None:
return
try:
pos_process = self.get_pos_process()
if pos_process:
pos_process.nice(prio)
# pos_process.ionice(psutil.IOPRIO_HIGH)
return
except Exception as e:
pass
def build_args(self):
if is_debug():
cmdline = os.path.join(BASE_DIR, 'bin', 'windows', 'plotter', 'test.exe')
return [cmdline, 'logs.txt', '500', '10000']
t = self.task
plot_id, plot_memo = get_plot_id_and_memo(t.fpk, t.ppk, t.nft)
temp2_folder = t.temporary2_folder
if not temp2_folder:
temp2_folder = t.temporary_folder
cmdline = t.cmdline
args = []
if t.plotter_type == PLOTTER_BUILTIN:
dt_string = datetime.now().strftime("%Y-%m-%d-%H-%M")
plot_filename: str = f"plot-k{t.k}-{dt_string}-{plot_id}.plot"
args = [
cmdline,
'create',
'-i', '0x' + plot_id,
'-m', '0x' + plot_memo,
'-k', f'{t.k}',
'-f', plot_filename,
'-r', f'{t.number_of_thread}',
'-u', f'{t.buckets}',
'-s', '65536',
'-t', t.temporary_folder,
'-2', temp2_folder,
'-b', f'{t.memory_size}',
'-p',
]
if t.nobitfield:
args.append('-e')
elif t.plotter_type == PLOTTER_OFFICIAL:
fpk = t.fpk
ppk = t.ppk
nft = t.nft
if fpk.startswith('0x'):
fpk = fpk[2:]
if ppk.startswith('0x'):
ppk = ppk[2:]
args = [
cmdline,
'plots',
'create',
# '-i', plot_id,
'-f', fpk,
# '-p', ppk,
# '-m', plot_memo,
'-k', f'{t.k}',
'-r', f'{t.number_of_thread}',
'-u', f'{t.buckets}',
'-t', t.temporary_folder,
'-2', temp2_folder,
'-b', f'{t.memory_size}',
]
if t.nobitfield:
args.append('-e')
if nft:
args += ['-c', nft]
else:
args += ['-p', ppk]
args.append('-x')
elif t.plotter_type == PLOTTER_CHIA_PLOT:
fpk = t.fpk
ppk = t.ppk
nft = t.nft
if fpk.startswith('0x'):
fpk = fpk[2:]
if ppk.startswith('0x'):
ppk = ppk[2:]
args = [
cmdline,
'-r', f'{t.number_of_thread}',
'-u', f'{t.buckets}',
'-t', t.temporary_folder + '/',
'-2', temp2_folder + '/',
'-f', fpk,
]
if nft:
args += ['-c', nft]
else:
args += ['-p', ppk]
return args
def run(self):
t = self.task
args = self.build_args()
config = get_config()
while True:
delay_remain = self.task.delay_remain()
if self.stopping:
self.stopping = False
self.sub_task.status = '已取消'
self.sub_task.finish = True
self.sub_task.success = False
self.sub_task.end_time = datetime.now()
for i in range(self.task.current_task_index + 1, len(self.task.sub_tasks)):
rest_sub_task = self.task.sub_tasks[i]
rest_sub_task.success = False
rest_sub_task.status = '已手动停止'
rest_sub_task.finish = True
self.updateTask(sub_task=rest_sub_task)
else:
self.updateTask()
break
if delay_remain:
time.sleep(1)
continue
self.task.running = False
if not PlotTaskManager.assign_task(self.task):
self.sub_task.status = '排队中'
time.sleep(1)
continue
hdd_folders = HDDFolders()
if self.task.auto_hdd_folder:
available_hdd_folder = PlotTaskManager.choise_available_hdd_folder(self.sub_task.k,
self.task.is_new_plot, self)
if self.task.is_new_plot and not available_hdd_folder and 'auto_delete_old_plot' in config and \
config['auto_delete_old_plot']:
self.task.able_to_next, available_hdd_folder = PlotTaskManager.free_space_for_plot_auto(
need_size=get_k_size(self.task.k), except_worker=self)
if not self.task.able_to_next:
self.sub_task.end_time = datetime.now()
for i in range(self.task.current_task_index, len(self.task.sub_tasks)):
rest_sub_task = self.task.sub_tasks[i]
rest_sub_task.success = False
rest_sub_task.status = '删除旧图失败'
rest_sub_task.finish = True
self.updateTask(sub_task=rest_sub_task)
break
elif not available_hdd_folder:
self.sub_task.end_time = datetime.now()
for i in range(self.task.current_task_index, len(self.task.sub_tasks)):
rest_sub_task = self.task.sub_tasks[i]
rest_sub_task.success = False
rest_sub_task.status = '无可用硬盘'
rest_sub_task.finish = True
self.updateTask(sub_task=rest_sub_task)
break
self.sub_task.hdd_folder = available_hdd_folder
elif not self.task.able_to_next:
driver = os.path.splitdrive(self.task.hdd_folder)[0]
need_size = PlotTaskManager.get_driver_running_size(driver, self)
need_size += get_k_size(self.task.k)
if self.task.is_new_plot and 'auto_delete_old_plot' in config and config['auto_delete_old_plot'] and \
hdd_folders.is_driver_have_old_plot(driver, need_size=need_size):
self.task.able_to_next, _ = hdd_folders.free_space_for_plot_in_driver(driver, need_size=need_size)
if not self.task.able_to_next:
self.sub_task.end_time = datetime.now()
for i in range(self.task.current_task_index, len(self.task.sub_tasks)):
rest_sub_task = self.task.sub_tasks[i]
rest_sub_task.success = False
rest_sub_task.status = '删除旧图失败'
rest_sub_task.finish = True
self.updateTask(sub_task=rest_sub_task)
break
else:
self.sub_task.end_time = datetime.now()
for i in range(self.task.current_task_index, len(self.task.sub_tasks)):
rest_sub_task = self.task.sub_tasks[i]
rest_sub_task.success = False
rest_sub_task.status = '硬盘已满'
rest_sub_task.finish = True
self.updateTask(sub_task=rest_sub_task)
break
args.append('-d')
args.append(self.sub_task.hdd_folder + '/')
self.sub_task.begin_time = datetime.now()
self.sub_task.status = '正在执行'
self.sub_task.progress = 0
self.updateTask()
exe_cwd = os.path.dirname(t.cmdline)
self.process = Popen(args, stdout=PIPE, stderr=PIPE, cwd=exe_cwd, creationflags=CREATE_NO_WINDOW)
self.priority = self.task.priority
success = True
finished = False
while True:
line = self.process.stdout.readline()
if not line and self.process.poll() is not None:
break
orig_text = line.decode('utf-8', errors='replace')
text = orig_text.rstrip()
if text:
self.sub_task.log.append(orig_text)
_failed, _finished = self.handleLog(text)
if _failed:
success = False
if _finished:
finished = True
self.signalTaskOutput.emit(self.task, self.sub_task, text)
self.updateTask()
self.process = None
self.task.running = False
failed = False
plot_path = os.path.join(self.sub_task.hdd_folder, self.plot_filename)
if self.stopping:
self.stopping = False
failed = True
self.sub_task.status = '已手动停止'
elif not success or not finished:
failed = True
if self.sub_task.log and 'bad allocation' in self.sub_task.log[-1]:
self.sub_task.status = '内存不足'
else:
self.sub_task.status = '失败'
elif not self.plot_filename:
failed = True
self.sub_task.status = '没有plot文件名'
elif not os.path.exists(plot_path) and not is_debug():
failed = True
self.sub_task.status = 'plot文件不存在'
else:
self.sub_task.status = '完成'
self.sub_task.finish = True
self.sub_task.success = True
self.sub_task.progress = 100.0
self.sub_task.suspended_seconds = 0
self.sub_task.end_time = datetime.now()
self.sub_task.plot_file = plot_path
if not self.task.able_to_next:
self.sub_task.status += '(硬盘已满)'
self.task.signalNewPlot.emit(self.task, self.sub_task)
self.updateTask()
break
self.updateTask()
if failed:
self.sub_task.end_time = datetime.now()
if self.task.specify_count:
if self.task.current_sub_task == self.sub_task:
for i in range(self.task.current_task_index, len(self.task.sub_tasks)):
rest_sub_task = self.task.sub_tasks[i]
rest_sub_task.success = False
rest_sub_task.status = self.sub_task.status
rest_sub_task.finish = True
self.updateTask(sub_task=rest_sub_task)
self.task.current_task_index = len(self.task.sub_tasks) - 1
else:
self.sub_task.success = False
self.sub_task.finish = True
self.updateTask(sub_task=self.sub_task)
else:
self.sub_task.success = False
self.sub_task.finish = True
self.updateTask(sub_task=self.sub_task)
self.updateTask()
break
def updateTask(self, task=None, sub_task=None):
if task is None:
task = self.task
if sub_task is None:
sub_task = self.sub_task
self.task.signalUpdateTask.emit(task, sub_task)
class PlotTaskManager(QObject):
signalUpdateTask = pyqtSignal(object, object)
signalBeforeMakingPlot = pyqtSignal(object, object)
signalMakingPlot = pyqtSignal(object, object)
signalNewPlot = pyqtSignal(object, object)
signalNewSubTask = pyqtSignal(object, object)
signalSubTaskDone = pyqtSignal(object, object)
tasks = []
task_lock = RWlock()
tasks_to_run = []
pending_tasks = []
def __init__(self):
super(PlotTaskManager, self).__init__()
self.load_tasks()
self.startTimer(1000)
@property
def working(self):
PlotTaskManager.task_lock.read_acquire()
for task in PlotTaskManager.tasks:
if task.working:
PlotTaskManager.task_lock.read_release()
return True
PlotTaskManager.task_lock.read_release()
return False
@staticmethod
def get_all_running_hdd_folders(except_worker=None):
running_folders = []
PlotTaskManager.task_lock.read_acquire()
for task in PlotTaskManager.tasks:
for sub in task.sub_tasks:
if sub.working:
if except_worker and except_worker == sub.worker:
continue
running_folders.append((sub.k, sub.hdd_folder))
PlotTaskManager.task_lock.read_release()
return running_folders
@staticmethod
def get_driver_running_size(driver, except_worker=None):
running_folders = PlotTaskManager.get_all_running_hdd_folders(except_worker)
running_size = 0
for running_object in running_folders:
running_k = running_object[0]
running_folder = running_object[1]
running_driver = os.path.splitdrive(running_folder)[0]
if driver == running_driver:
running_size += get_k_size(running_k)
return running_size
@staticmethod
def free_space_for_plot_auto(need_size, except_worker=None):
hdd_folders = HDDFolders()
drivers = hdd_folders.get_drivers()
random.shuffle(drivers)
for driver in drivers:
if not hdd_folders.is_driver_have_old_and_new_folders(driver):
continue
running_size = PlotTaskManager.get_driver_running_size(driver, except_worker)
if not hdd_folders.is_driver_have_old_plot(driver, need_size=running_size + need_size):
continue
success, _ = hdd_folders.free_space_for_plot_in_driver(driver, running_size + need_size)
if success:
return success, hdd_folders.get_driver_new_folder(driver)
return False, ''
@staticmethod
def have_free_space_for_plot_auto(need_size, except_worker=None):
hdd_folders = HDDFolders()
drivers = hdd_folders.get_drivers()
for driver in drivers:
if not hdd_folders.is_driver_have_old_and_new_folders(driver):
continue
running_size = PlotTaskManager.get_driver_running_size(driver, except_worker)
if not hdd_folders.is_driver_have_old_plot(driver, need_size=running_size + need_size):
continue
return True
return False
@staticmethod
def is_task_able_to_next(task: PlotTask, except_worker=None):
# if is_debug():
# return False
running_folders = PlotTaskManager.get_all_running_hdd_folders(except_worker)
folder = task.hdd_folder
usage = get_disk_usage(folder)
if usage is None:
return False
free = usage.free
for running_object in running_folders:
running_k = running_object[0]
running_folder = running_object[1]
if running_folder == folder:
free -= get_k_size(running_k)
return free > get_k_size(task.k)
@staticmethod
def choise_available_hdd_folder(k, new_plot, except_worker=None):
# if is_debug():
# return ''
running_folders = PlotTaskManager.get_all_running_hdd_folders(except_worker)
available_folders = []
config = get_config()
for hdd_folder_obj in config['hdd_folders']:
folder = hdd_folder_obj['folder']
if new_plot != hdd_folder_obj['new_plot']:
continue
if not os.path.exists(folder):
continue
usage = get_disk_usage(folder)
if usage is None:
continue
free = usage.free
for running_object in running_folders:
running_k = running_object[0]
running_folder = running_object[1]
if running_folder == folder:
free -= get_k_size(running_k)
if is_debug():
available_folders.append(folder)
continue
if free > get_k_size(k):
available_folders.append(folder)
if len(available_folders) == 0:
return ''
return random.choice(available_folders)
def connect_task(self, task: PlotTask):
task.signalUpdateTask.connect(self.signalUpdateTask)
task.signalBeforeMakingPlot.connect(self.signalBeforeMakingPlot)
task.signalMakingPlot.connect(self.signalMakingPlot)
task.signalNewPlot.connect(self.signalNewPlot)
task.signalNewSubTask.connect(self.signalNewSubTask)
task.signalSubTaskDone.connect(self.signalSubTaskDone)
def add_task(self, task: PlotTask):
self.connect_task(task)
PlotTaskManager.task_lock.write_acquire()
PlotTaskManager.tasks.append(task)
PlotTaskManager.task_lock.write_release()
PlotTaskManager.save_tasks()
def remove_task(self, task: PlotTask):
PlotTaskManager.task_lock.write_acquire()
PlotTaskManager.tasks.remove(task)
if task in PlotTaskManager.pending_tasks:
PlotTaskManager.pending_tasks.remove(task)
if task in PlotTaskManager.tasks_to_run:
PlotTaskManager.tasks_to_run.remove(task)
PlotTaskManager.task_lock.write_release()
PlotTaskManager.save_tasks()
def load_tasks(self):
PlotTaskManager.task_lock.write_acquire()
PlotTaskManager.tasks = []
try:
filename = os.path.join(BASE_DIR, 'tasks.pkl')
if os.path.exists(filename):
task_data = open(filename, 'rb').read()
PlotTaskManager.tasks = pickle.loads(task_data)
except:
pass
changed = False
for task in PlotTaskManager.tasks:
self.connect_task(task)
not_finish = False
for sub_task in task.sub_tasks:
if not sub_task.finish:
sub_task.status = '异常结束'
sub_task.end_time = datetime.now()
sub_task.finish = True
changed = True
not_finish = True
if not_finish:
task.current_task_index = len(task.sub_tasks) - 1
PlotTaskManager.task_lock.write_release()
if changed:
PlotTaskManager.save_tasks()
@staticmethod
def save_tasks():
filename_tmp = os.path.join(BASE_DIR, 'tasks.pkl.tmp')
filename = os.path.join(BASE_DIR, 'tasks.pkl')
PlotTaskManager.task_lock.read_acquire()
tasks_data = pickle.dumps(PlotTaskManager.tasks)
PlotTaskManager.task_lock.read_release()
try:
open(filename_tmp, 'wb').write(tasks_data)
if os.path.exists(filename):
os.remove(filename)
os.rename(filename_tmp, filename)
except Exception as e:
pass
return
@staticmethod
def get_tasks_count_info(lock=True):
if lock:
PlotTaskManager.task_lock.read_acquire()
config = get_config()
next_when_fully_complete = 'next_when_fully_complete' in config and config['next_when_fully_complete']
total_count = 0
phase1_count = 0
for _task in PlotTaskManager.tasks:
if _task.current_sub_task.working:
if _task.current_sub_task.copying and not next_when_fully_complete:
continue
total_count += 1
if _task.phase == 1:
phase1_count += 1
if lock:
PlotTaskManager.task_lock.read_release()
return total_count, phase1_count
def timerEvent(self, QTimerEvent):
PlotTaskManager.process_queue()
| |
"""
YTArray class.
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import copy
import numpy as np
from distutils.version import LooseVersion
from functools import wraps
from numpy import \
add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, \
floor_divide, negative, power, remainder, mod, absolute, rint, \
sign, conj, exp, exp2, log, log2, log10, expm1, log1p, sqrt, square, \
reciprocal, sin, cos, tan, arcsin, arccos, arctan, arctan2, \
hypot, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg, \
bitwise_and, bitwise_or, bitwise_xor, invert, left_shift, right_shift, \
greater, greater_equal, less, less_equal, not_equal, equal, logical_and, \
logical_or, logical_xor, logical_not, maximum, minimum, fmax, fmin, \
isreal, iscomplex, isfinite, isinf, isnan, signbit, copysign, nextafter, \
modf, ldexp, frexp, fmod, floor, ceil, trunc, fabs, spacing
try:
# numpy 1.13 or newer
from numpy import positive, divmod as divmod_, isnat, heaviside
except ImportError:
positive, divmod_, isnat, heaviside = (None,)*4
from yt.units.unit_object import Unit, UnitParseError
from yt.units.unit_registry import UnitRegistry
from yt.units.dimensions import \
angle, \
current_mks, \
dimensionless, \
em_dimensions
from yt.utilities.exceptions import \
YTUnitOperationError, YTUnitConversionError, \
YTUfuncUnitError, YTIterableUnitCoercionError, \
YTInvalidUnitEquivalence, YTEquivalentDimsError
from yt.utilities.lru_cache import lru_cache
from numbers import Number as numeric_type
from yt.utilities.on_demand_imports import _astropy
from sympy import Rational
from yt.units.unit_lookup_table import \
default_unit_symbol_lut
from yt.units.equivalencies import equivalence_registry
from yt.utilities.logger import ytLogger as mylog
from .pint_conversions import convert_pint_units
NULL_UNIT = Unit()
POWER_SIGN_MAPPING = {multiply: 1, divide: -1}
# redefine this here to avoid a circular import from yt.funcs
def iterable(obj):
try: len(obj)
except: return False
return True
def return_arr(func):
@wraps(func)
def wrapped(*args, **kwargs):
ret, units = func(*args, **kwargs)
if ret.shape == ():
return YTQuantity(ret, units)
else:
# This could be a subclass, so don't call YTArray directly.
return type(args[0])(ret, units)
return wrapped
@lru_cache(maxsize=128, typed=False)
def sqrt_unit(unit):
return unit**0.5
@lru_cache(maxsize=128, typed=False)
def multiply_units(unit1, unit2):
return unit1 * unit2
def preserve_units(unit1, unit2=None):
return unit1
@lru_cache(maxsize=128, typed=False)
def power_unit(unit, power):
return unit**power
@lru_cache(maxsize=128, typed=False)
def square_unit(unit):
return unit*unit
@lru_cache(maxsize=128, typed=False)
def divide_units(unit1, unit2):
return unit1/unit2
@lru_cache(maxsize=128, typed=False)
def reciprocal_unit(unit):
return unit**-1
def passthrough_unit(unit, unit2=None):
return unit
def return_without_unit(unit, unit2=None):
return None
def arctan2_unit(unit1, unit2):
return NULL_UNIT
def comparison_unit(unit1, unit2=None):
return None
def invert_units(unit):
raise TypeError(
"Bit-twiddling operators are not defined for YTArray instances")
def bitop_units(unit1, unit2):
raise TypeError(
"Bit-twiddling operators are not defined for YTArray instances")
def get_inp_u_unary(ufunc, inputs, out_arr=None):
inp = inputs[0]
u = getattr(inp, 'units', None)
if u is None:
u = NULL_UNIT
if u.dimensions is angle and ufunc in trigonometric_operators:
inp = inp.in_units('radian').v
if out_arr is not None:
out_arr = ufunc(inp).view(np.ndarray)
return out_arr, inp, u
def get_inp_u_binary(ufunc, inputs):
inp1 = coerce_iterable_units(inputs[0])
inp2 = coerce_iterable_units(inputs[1])
unit1 = getattr(inp1, 'units', None)
unit2 = getattr(inp2, 'units', None)
ret_class = get_binary_op_return_class(type(inp1), type(inp2))
if unit1 is None:
unit1 = Unit(registry=getattr(unit2, 'registry', None))
if unit2 is None and ufunc is not power:
unit2 = Unit(registry=getattr(unit1, 'registry', None))
elif ufunc is power:
unit2 = inp2
if isinstance(unit2, np.ndarray):
if isinstance(unit2, YTArray):
if unit2.units.is_dimensionless:
pass
else:
raise YTUnitOperationError(ufunc, unit1, unit2)
unit2 = 1.0
return (inp1, inp2), (unit1, unit2), ret_class
def handle_preserve_units(inps, units, ufunc, ret_class):
if units[0] != units[1]:
any_nonzero = [np.any(inps[0]), np.any(inps[1])]
if any_nonzero[0] == np.bool_(False):
units = (units[1], units[1])
elif any_nonzero[1] == np.bool_(False):
units = (units[0], units[0])
else:
if not units[0].same_dimensions_as(units[1]):
raise YTUnitOperationError(ufunc, *units)
inps = (inps[0], ret_class(inps[1]).to(
ret_class(inps[0]).units))
return inps, units
def handle_comparison_units(inps, units, ufunc, ret_class, raise_error=False):
if units[0] != units[1]:
u1d = units[0].is_dimensionless
u2d = units[1].is_dimensionless
any_nonzero = [np.any(inps[0]), np.any(inps[1])]
if any_nonzero[0] == np.bool_(False):
units = (units[1], units[1])
elif any_nonzero[1] == np.bool_(False):
units = (units[0], units[0])
elif not any([u1d, u2d]):
if not units[0].same_dimensions_as(units[1]):
raise YTUnitOperationError(ufunc, *units)
else:
if raise_error:
raise YTUfuncUnitError(ufunc, *units)
inps = (inps[0], ret_class(inps[1]).to(
ret_class(inps[0]).units))
return inps, units
def handle_multiply_divide_units(unit, units, out, out_arr):
if unit.is_dimensionless and unit.base_value != 1.0:
if not units[0].is_dimensionless:
if units[0].dimensions == units[1].dimensions:
out_arr = np.multiply(out_arr.view(np.ndarray),
unit.base_value, out=out)
unit = Unit(registry=unit.registry)
return out, out_arr, unit
def coerce_iterable_units(input_object):
if isinstance(input_object, np.ndarray):
return input_object
if iterable(input_object):
if any([isinstance(o, YTArray) for o in input_object]):
ff = getattr(input_object[0], 'units', NULL_UNIT, )
if any([ff != getattr(_, 'units', NULL_UNIT) for _ in input_object]):
raise YTIterableUnitCoercionError(input_object)
# This will create a copy of the data in the iterable.
return YTArray(input_object)
return input_object
else:
return input_object
def sanitize_units_mul(this_object, other_object):
inp = coerce_iterable_units(this_object)
ret = coerce_iterable_units(other_object)
# If the other object is a YTArray and has the same dimensions as the object
# under consideration, convert so we don't mix units with the same
# dimensions.
if isinstance(ret, YTArray):
if inp.units.same_dimensions_as(ret.units):
ret.in_units(inp.units)
return ret
def sanitize_units_add(this_object, other_object, op_string):
inp = coerce_iterable_units(this_object)
ret = coerce_iterable_units(other_object)
# Make sure the other object is a YTArray before we use the `units`
# attribute.
if isinstance(ret, YTArray):
if not inp.units.same_dimensions_as(ret.units):
# handle special case of adding or subtracting with zero or
# array filled with zero
if not np.any(other_object):
return ret.view(np.ndarray)
elif not np.any(this_object):
return ret
raise YTUnitOperationError(op_string, inp.units, ret.units)
ret = ret.in_units(inp.units)
else:
# If the other object is not a YTArray, then one of the arrays must be
# dimensionless or filled with zeros
if not inp.units.is_dimensionless and np.any(ret):
raise YTUnitOperationError(op_string, inp.units, dimensionless)
return ret
def validate_comparison_units(this, other, op_string):
# Check that other is a YTArray.
if hasattr(other, 'units'):
if this.units.expr is other.units.expr:
if this.units.base_value == other.units.base_value:
return other
if not this.units.same_dimensions_as(other.units):
raise YTUnitOperationError(op_string, this.units, other.units)
return other.in_units(this.units)
return other
@lru_cache(maxsize=128, typed=False)
def _unit_repr_check_same(my_units, other_units):
"""
Takes a Unit object, or string of known unit symbol, and check that it
is compatible with this quantity. Returns Unit object.
"""
# let Unit() handle units arg if it's not already a Unit obj.
if not isinstance(other_units, Unit):
other_units = Unit(other_units, registry=my_units.registry)
equiv_dims = em_dimensions.get(my_units.dimensions, None)
if equiv_dims == other_units.dimensions:
if current_mks in equiv_dims.free_symbols:
base = "SI"
else:
base = "CGS"
raise YTEquivalentDimsError(my_units, other_units, base)
if not my_units.same_dimensions_as(other_units):
raise YTUnitConversionError(
my_units, my_units.dimensions, other_units, other_units.dimensions)
return other_units
unary_operators = (
negative, absolute, rint, sign, conj, exp, exp2, log, log2,
log10, expm1, log1p, sqrt, square, reciprocal, sin, cos, tan, arcsin,
arccos, arctan, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad,
rad2deg, invert, logical_not, isreal, iscomplex, isfinite, isinf, isnan,
signbit, floor, ceil, trunc, modf, frexp, fabs, spacing, positive, isnat,
)
binary_operators = (
add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, power,
remainder, mod, arctan2, hypot, bitwise_and, bitwise_or, bitwise_xor,
left_shift, right_shift, greater, greater_equal, less, less_equal,
not_equal, equal, logical_and, logical_or, logical_xor, maximum, minimum,
fmax, fmin, copysign, nextafter, ldexp, fmod, divmod_, heaviside
)
trigonometric_operators = (
sin, cos, tan,
)
class YTArray(np.ndarray):
"""
An ndarray subclass that attaches a symbolic unit object to the array data.
Parameters
----------
input_array : :obj:`!iterable`
A tuple, list, or array to attach units to
input_units : String unit specification, unit symbol object, or astropy units
The units of the array. Powers must be specified using python
syntax (cm**3, not cm^3).
registry : ~yt.units.unit_registry.UnitRegistry
The registry to create units from. If input_units is already associated
with a unit registry and this is specified, this will be used instead of
the registry associated with the unit object.
dtype : data-type
The dtype of the array data. Defaults to the dtype of the input data,
or, if none is found, uses np.float64
bypass_validation : boolean
If True, all input validation is skipped. Using this option may produce
corrupted, invalid units or array data, but can lead to significant
speedups in the input validation logic adds significant overhead. If set,
input_units *must* be a valid unit object. Defaults to False.
Examples
--------
>>> from yt import YTArray
>>> a = YTArray([1, 2, 3], 'cm')
>>> b = YTArray([4, 5, 6], 'm')
>>> a + b
YTArray([ 401., 502., 603.]) cm
>>> b + a
YTArray([ 4.01, 5.02, 6.03]) m
NumPy ufuncs will pass through units where appropriate.
>>> import numpy as np
>>> a = YTArray(np.arange(8) - 4, 'g/cm**3')
>>> np.abs(a)
YTArray([4, 3, 2, 1, 0, 1, 2, 3]) g/cm**3
and strip them when it would be annoying to deal with them.
>>> np.log10(a)
array([ -inf, 0. , 0.30103 , 0.47712125, 0.60205999,
0.69897 , 0.77815125, 0.84509804])
YTArray is tightly integrated with yt datasets:
>>> import yt
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> a = ds.arr(np.ones(5), 'code_length')
>>> a.in_cgs()
YTArray([ 3.08600000e+24, 3.08600000e+24, 3.08600000e+24,
3.08600000e+24, 3.08600000e+24]) cm
This is equivalent | |
considered slices. In each slice, the currently tracked MOTS is found
after which we move on to the next slice. The MOTS found in the
previous slice is then prepared as reference shape and initial guess
for the current slice.
The number of slices we advance in each step is `initial_stride` until
a MOTS is not found. In this case, the stride is reduced and the
smaller step is retried.
Note that if the tracker is run multiple times with the same output
configuration (same folders and ``hname``), then any curves found
previously will be loaded instead of being re-computed. Effectively, a
second run will then only take seconds when the first one finished at
some point (even if this means it diverged at a particular slice).
In case the tracker is running in *property* mode (i.e. with
``compute=False`` and ``props != None``), the same exact logic is
applied, the difference being that we error out (or wait) if no
previous curve is found.
"""
self._init_tracking()
curves = []
try_no = 1 if self.initial_stride > 1 else None
if self.initial_try_count is not None:
try_no = self.initial_try_count
stride = self.initial_stride
i = 0
while True:
if i >= len(self.files):
break
c, fname = self._step(i, try_no=try_no, stride=stride)
converged = c and c.user_data.get('converged', True)
if i and stride > 1 and not converged:
i -= stride # back to previous successful slice
stride = int(stride/2)
i += stride # advance to closer next slice
try_no += 1
self._c_ref = self._c_ref_prev
continue
if not c:
break
curves.append([c, fname])
if not converged:
# A curve was produced (and we computed its properties), but it
# "formally" did not converge. This may be due to insufficient
# maximum resolution, in which case it might still be a good
# approximation of the solution. Another reason could be that the
# step limit was reached. In this case the curve could be
# completely off. Anyway, we still saved it to allow inspection.
# The user can determine whether the result converged by checking
# the values of `user_data['converged']` and `user_data['reason']`.
self._p("Previous surface did not converge. Reason: %s"
% c.user_data.get('reason', 'unspecified'))
self._p("Stopping analysis here.")
break
i += stride
if self.full_output:
return curves
def _step(self, i, try_no, stride):
r"""Find the MOTS in the i'th slice.
@param i
Slice file index.
@param try_no
How many times we reduced the stride length already. This changes
the file name, since a first try at a larger stride length which
failed may now succeed with smaller steps.
@param stride
Current stride length (number of files to advance). This is *not*
the number of iterations to advance if the stored simulation files
do not contain every slice.
"""
g = self.get_metric(i)
try:
return self._do_step(g, i, try_no, stride)
finally:
g.unload_data()
def _copy_curve(self, c):
r"""Helper to copy a curve.
If this curve is `None` (i.e. not found/given) or a number (e.g. for
an initial guess of a round sphere), then no copying is required.
"""
if c is None:
return c
if isinstance(c, numbers.Number):
return c
return c.copy()
def _do_step(self, g, i, try_no, stride):
r"""Implement _step()"""
self._c_ref_prev = self._copy_curve(self._c_ref)
self._p("=" * 72)
self._p("Data file %d/%d: %s" % (i+1, len(self.files), self.files[i]))
self._p(" %s" % time.strftime('%Y-%m-%d %H:%M:%S %z'))
if self.initial_stride > 1:
self._p(" evolution time: %s, iteration: %s, try: %s, stride: %d"
% (g.time, g.iteration, try_no, stride))
else:
self._p(" evolution time: %s, iteration: %s"
% (g.time, g.iteration))
cfg = self._get_cfg(g, try_no)
c, fname, cfg = self._load_existing_curve(cfg)
if not c and fname:
# curve was computed but did not converge (only `None` was saved)
return None, None
if not c and not self.compute:
raise FileNotFoundError("Curve missing. Previous results expected.")
self._prepare_metric_for_computation(g, c)
if not c:
cfg.update(c_ref=self._current_ref_curve(cfg))
suffix = cfg.suffix
c, fname = self._call_find_mots(
cfg, pass_nr=1, timings=self.timings,
callback=lambda curve: self._optimize_parameterization(curve, cfg)
)
if not c:
return None, None
if self.two_pass:
cfg.update(suffix=suffix) # reset suffix if changed by cfg_callback
self._c_ref = c
c, fname = self._call_find_mots(
cfg, pass_nr=2, timings=self.timings,
c_ref=self._current_ref_curve(cfg, allow_transform=False),
)
if not c:
return None, None
self._compute_properties(c, fname)
shape_location = self._get_shape_location(c)
if shape_location is not None:
self._prev_locations[g.time] = shape_location
# NOTE: Assigning to `self._c_ref` has the effect that *any*
# subsequent invocation of `_call_find_mots()` deletes the
# search config of the assigned curve (the data in
# `c_ref.user_data['cfg']`). Hence, we need to take care not to
# trigger such a call before the result has been saved. Note
# that this is not a problem here, since the above `find_mots()`
# calls have stored the result already and/or the
# `_compute_properties()` call did too. From now on, the curve
# is only used as reference curve can *can* have its search
# config be removed.
self._c_ref = c
return c, fname
def _get_shape_location(self, c):
r"""Return the z-coordinate of the location of the curve's shape."""
if self.use_location == "center":
return (c(0)[1] + c(np.pi)[1]) / 2.0
if self.use_location == "north":
return c(0)[1]
if self.use_location == "south":
return c(np.pi)[1]
if self.use_location == "xmin":
param = c.find_local_min_x()
if param is None:
self._p("Could not determine location of local x-minimum.")
return None
z = c(param)[1]
self._p("z-coordinate of x-minimum: %s" % z)
return z
raise ValueError("Unknown location: %s" % (self.use_location,))
def _current_ref_curve(self, cfg, allow_transform=True):
r"""Prepare and return the current reference curve.
This also updates the given configuration with the bipolar coordinate
setup to use (in case these coordinates are activated) and performs
the *neck trick*.
"""
g = cfg.metric
c_ref = self._c_ref
if cfg.bipolar_ref_curve:
bipolar_kw = cfg.bipolar_kw or dict()
if self.auto_bipolar_move:
bipolar_kw['move'] = self._get_bipolar_origin(cfg)
if self._do_bipolar_scaling_optimization(cfg):
bipolar_kw['scale'] = self._get_bipolar_autoscale(c_ref, cfg)
cfg.bipolar_kw = bipolar_kw
if self._do_curv2_optimization(cfg):
cfg.reparam = self._get_curv2_reparam_settings(c_ref, cfg)
if allow_transform:
c_ref = self.apply_velocity(g, c_ref)
c_ref = self.neck_trick(g, c_ref)
return c_ref
def _reparam_settings(self, cfg):
if isinstance(cfg.reparam, (list, tuple)):
strategy, opts = cfg.reparam
else:
strategy = cfg.reparam
opts = dict()
return strategy, opts
def _do_bipolar_scaling_optimization(self, cfg):
bipolar_kw = cfg.bipolar_kw or dict()
return (bipolar_kw.get('scale', None) is None
or (not self._is_first_slice(cfg.metric)
and self.follow_bipolar_scaling))
def _do_curv2_optimization(self, cfg):
# We optimize the curv2 smoothing iff we also optimize the bipolar
# scaling and smoothing is not fixed. This should be made independent!
# need fresh cfg without auto-settings
cfg = self._get_cfg(cfg.metric, 1) # we should solve this differently, though
reparam, opts = self._reparam_settings(cfg)
return (reparam == 'curv2'
and (opts.get('smoothing', None) is None
or (not self._is_first_slice(cfg.metric)
and self.follow_curv2_smoothing))
and cfg.bipolar_ref_curve)
def _get_curv2_reparam_settings(self, curve, cfg):
reparam_strategy, reparam_opts = self._reparam_settings(cfg)
key = 'optimized_curv2_smoothing'
if key in curve.user_data:
reparam_opts['smoothing'] = curve.user_data[key]
return reparam_strategy, reparam_opts
def _optimize_parameterization(self, curve, cfg):
r"""Optimize and update the stored parameterization for the given MOTS.
The "optimal" settings are stored in the `user_data` of the current
curve and may then be used for the search for the next MOTS. The curve
data itself (shape and reference curve) is not modified in any way.
"""
if curve is None or not cfg.bipolar_ref_curve:
return
if self._reparam_settings(cfg)[0] == 'curv2':
scale, smoothing = self._determine_optimal_parameters(
curve=curve, cfg=cfg,
initial_smoothing=self._get_curv2_reparam_settings(
curve, cfg
)[1].get('smoothing', 0.05)
)
curve.user_data['optimized_curv2_smoothing'] = smoothing
else:
scale = self._determine_optimal_parameters(curve=curve, cfg=cfg)
curve.user_data['optimized_bipolar_scale'] = scale
def _get_bipolar_autoscale(self, curve, cfg):
r"""Return previously determined bipolar scale or a rough estimate."""
key = 'optimized_bipolar_scale'
if key in curve.user_data:
return curve.user_data[key]
scale, _ = self._get_parameter_guesses(curve, cfg)
return scale
def _get_bipolar_origin(self, cfg):
r"""Return the origin for bipolar coordinates.
If the origin is specified using configuration options (`bipolar_kw`),
then this value is returned. Otherwise, we use the predicted location
of the MOTS (i.e. the center between the top and bottom MOTSs in case
the neck trick is chosen).
"""
g = cfg.metric
c_ref = self._c_ref
if self.predict_location:
z, _ = self.predicted_location(
time=g.time, prev_curve=c_ref, full_output=True,
verbose=False,
)
if z is not None:
return z
else:
neck_info = self._get_neck_info(g)
if neck_info.has_data:
return neck_info.z_center
if callable(c_ref):
return (c_ref(0)[1]+c_ref(np.pi)[1]) / 2.0
if isinstance(c_ref, (list, tuple)):
return c_ref[-1] # z-offset in case of circle
return 0.0
def _get_parameter_guesses(self, curve, cfg):
if not cfg.bipolar_ref_curve:
return None, None
g = cfg.metric
| |
self.security_groups,
instance_type=self.instance_type,
tags={"instance": propagated_tags},
placement=random.choice(self.availability_zones),
launch_config=self.launch_config,
)
for instance in reservation.instances:
instance.autoscaling_group = self
self.instance_states.append(
InstanceState(
instance,
protected_from_scale_in=self.new_instances_protected_from_scale_in,
)
)
def append_target_groups(self, target_group_arns):
append = [x for x in target_group_arns if x not in self.target_group_arns]
self.target_group_arns.extend(append)
class AutoScalingBackend(BaseBackend):
def __init__(self, region_name):
self.autoscaling_groups = OrderedDict()
self.launch_configurations = OrderedDict()
self.policies = {}
self.lifecycle_hooks = {}
self.ec2_backend = ec2_backends[region_name]
self.elb_backend = elb_backends[region_name]
self.elbv2_backend = elbv2_backends[region_name]
self.region = region_name
def reset(self):
region = self.region
self.__dict__ = {}
self.__init__(region)
@staticmethod
def default_vpc_endpoint_service(service_region, zones):
"""Default VPC endpoint service."""
return BaseBackend.default_vpc_endpoint_service_factory(
service_region, zones, "autoscaling"
) + BaseBackend.default_vpc_endpoint_service_factory(
service_region, zones, "autoscaling-plans"
)
def create_launch_configuration(
self,
name,
image_id,
key_name,
kernel_id,
ramdisk_id,
security_groups,
user_data,
instance_type,
instance_monitoring,
instance_profile_name,
spot_price,
ebs_optimized,
associate_public_ip_address,
block_device_mappings,
instance_id=None,
):
valid_requests = [
instance_id is not None,
image_id is not None and instance_type is not None,
]
if not any(valid_requests):
raise ValidationError(
"Valid requests must contain either the InstanceID parameter or both the ImageId and InstanceType parameters."
)
if instance_id is not None:
# TODO: https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-lc-with-instanceID.html
pass
launch_configuration = FakeLaunchConfiguration(
name=name,
image_id=image_id,
key_name=key_name,
kernel_id=kernel_id,
ramdisk_id=ramdisk_id,
security_groups=security_groups,
user_data=user_data,
instance_type=instance_type,
instance_monitoring=instance_monitoring,
instance_profile_name=instance_profile_name,
spot_price=spot_price,
ebs_optimized=ebs_optimized,
associate_public_ip_address=associate_public_ip_address,
block_device_mapping_dict=block_device_mappings,
)
self.launch_configurations[name] = launch_configuration
return launch_configuration
def describe_launch_configurations(self, names):
configurations = self.launch_configurations.values()
if names:
return [
configuration
for configuration in configurations
if configuration.name in names
]
else:
return list(configurations)
def delete_launch_configuration(self, launch_configuration_name):
self.launch_configurations.pop(launch_configuration_name, None)
def create_auto_scaling_group(
self,
name,
availability_zones,
desired_capacity,
max_size,
min_size,
launch_config_name,
launch_template,
vpc_zone_identifier,
default_cooldown,
health_check_period,
health_check_type,
load_balancers,
target_group_arns,
placement_group,
termination_policies,
tags,
new_instances_protected_from_scale_in=False,
instance_id=None,
):
def make_int(value):
return int(value) if value is not None else value
max_size = make_int(max_size)
min_size = make_int(min_size)
desired_capacity = make_int(desired_capacity)
default_cooldown = make_int(default_cooldown)
if health_check_period is None:
health_check_period = 300
else:
health_check_period = make_int(health_check_period)
# TODO: Add MixedInstancesPolicy once implemented.
# Verify only a single launch config-like parameter is provided.
params = [launch_config_name, launch_template, instance_id]
num_params = sum([1 for param in params if param])
if num_params != 1:
raise ValidationError(
"Valid requests must contain either LaunchTemplate, LaunchConfigurationName, "
"InstanceId or MixedInstancesPolicy parameter."
)
if instance_id:
try:
instance = self.ec2_backend.get_instance(instance_id)
launch_config_name = name
FakeLaunchConfiguration.create_from_instance(
launch_config_name, instance, self
)
except InvalidInstanceIdError:
raise InvalidInstanceError(instance_id)
group = FakeAutoScalingGroup(
name=name,
availability_zones=availability_zones,
desired_capacity=desired_capacity,
max_size=max_size,
min_size=min_size,
launch_config_name=launch_config_name,
launch_template=launch_template,
vpc_zone_identifier=vpc_zone_identifier,
default_cooldown=default_cooldown,
health_check_period=health_check_period,
health_check_type=health_check_type,
load_balancers=load_balancers,
target_group_arns=target_group_arns,
placement_group=placement_group,
termination_policies=termination_policies,
autoscaling_backend=self,
ec2_backend=self.ec2_backend,
tags=tags,
new_instances_protected_from_scale_in=new_instances_protected_from_scale_in,
)
self.autoscaling_groups[name] = group
self.update_attached_elbs(group.name)
self.update_attached_target_groups(group.name)
return group
def update_auto_scaling_group(
self,
name,
availability_zones,
desired_capacity,
max_size,
min_size,
launch_config_name,
launch_template,
vpc_zone_identifier,
health_check_period,
health_check_type,
new_instances_protected_from_scale_in=None,
):
"""
The parameter DefaultCooldown, PlacementGroup, TerminationPolicies are not yet implemented
"""
# TODO: Add MixedInstancesPolicy once implemented.
# Verify only a single launch config-like parameter is provided.
if launch_config_name and launch_template:
raise ValidationError(
"Valid requests must contain either LaunchTemplate, LaunchConfigurationName "
"or MixedInstancesPolicy parameter."
)
group = self.autoscaling_groups[name]
group.update(
availability_zones=availability_zones,
desired_capacity=desired_capacity,
max_size=max_size,
min_size=min_size,
launch_config_name=launch_config_name,
launch_template=launch_template,
vpc_zone_identifier=vpc_zone_identifier,
health_check_period=health_check_period,
health_check_type=health_check_type,
new_instances_protected_from_scale_in=new_instances_protected_from_scale_in,
)
return group
def describe_auto_scaling_groups(self, names):
groups = self.autoscaling_groups.values()
if names:
return [group for group in groups if group.name in names]
else:
return list(groups)
def delete_auto_scaling_group(self, group_name):
self.set_desired_capacity(group_name, 0)
self.autoscaling_groups.pop(group_name, None)
def describe_auto_scaling_instances(self, instance_ids):
instance_states = []
for group in self.autoscaling_groups.values():
instance_states.extend(
[
x
for x in group.instance_states
if not instance_ids or x.instance.id in instance_ids
]
)
return instance_states
def attach_instances(self, group_name, instance_ids):
group = self.autoscaling_groups[group_name]
original_size = len(group.instance_states)
if (original_size + len(instance_ids)) > group.max_size:
raise ResourceContentionError
else:
group.desired_capacity = original_size + len(instance_ids)
new_instances = [
InstanceState(
self.ec2_backend.get_instance(x),
protected_from_scale_in=group.new_instances_protected_from_scale_in,
autoscaling_group=group,
)
for x in instance_ids
]
for instance in new_instances:
self.ec2_backend.create_tags(
[instance.instance.id], {ASG_NAME_TAG: group.name}
)
group.instance_states.extend(new_instances)
self.update_attached_elbs(group.name)
self.update_attached_target_groups(group.name)
def set_instance_health(self, instance_id, health_status):
"""
The ShouldRespectGracePeriod-parameter is not yet implemented
"""
instance = self.ec2_backend.get_instance(instance_id)
instance_state = next(
instance_state
for group in self.autoscaling_groups.values()
for instance_state in group.instance_states
if instance_state.instance.id == instance.id
)
instance_state.health_status = health_status
def detach_instances(self, group_name, instance_ids, should_decrement):
group = self.autoscaling_groups[group_name]
original_size = group.desired_capacity
detached_instances = [
x for x in group.instance_states if x.instance.id in instance_ids
]
for instance in detached_instances:
self.ec2_backend.delete_tags(
[instance.instance.id], {ASG_NAME_TAG: group.name}
)
new_instance_state = [
x for x in group.instance_states if x.instance.id not in instance_ids
]
group.instance_states = new_instance_state
if should_decrement:
group.desired_capacity = original_size - len(instance_ids)
group.set_desired_capacity(group.desired_capacity)
return detached_instances
def set_desired_capacity(self, group_name, desired_capacity):
group = self.autoscaling_groups[group_name]
group.set_desired_capacity(desired_capacity)
self.update_attached_elbs(group_name)
def change_capacity(self, group_name, scaling_adjustment):
group = self.autoscaling_groups[group_name]
desired_capacity = group.desired_capacity + scaling_adjustment
self.set_desired_capacity(group_name, desired_capacity)
def change_capacity_percent(self, group_name, scaling_adjustment):
"""http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html
If PercentChangeInCapacity returns a value between 0 and 1,
Auto Scaling will round it off to 1. If the PercentChangeInCapacity
returns a value greater than 1, Auto Scaling will round it off to the
lower value. For example, if PercentChangeInCapacity returns 12.5,
then Auto Scaling will round it off to 12."""
group = self.autoscaling_groups[group_name]
percent_change = 1 + (scaling_adjustment / 100.0)
desired_capacity = group.desired_capacity * percent_change
if group.desired_capacity < desired_capacity < group.desired_capacity + 1:
desired_capacity = group.desired_capacity + 1
else:
desired_capacity = int(desired_capacity)
self.set_desired_capacity(group_name, desired_capacity)
def create_lifecycle_hook(self, name, as_name, transition, timeout, result):
lifecycle_hook = FakeLifeCycleHook(name, as_name, transition, timeout, result)
self.lifecycle_hooks["%s_%s" % (as_name, name)] = lifecycle_hook
return lifecycle_hook
def describe_lifecycle_hooks(self, as_name, lifecycle_hook_names=None):
return [
lifecycle_hook
for lifecycle_hook in self.lifecycle_hooks.values()
if (lifecycle_hook.as_name == as_name)
and (
not lifecycle_hook_names or lifecycle_hook.name in lifecycle_hook_names
)
]
def delete_lifecycle_hook(self, as_name, name):
self.lifecycle_hooks.pop("%s_%s" % (as_name, name), None)
def put_scaling_policy(
self,
name,
policy_type,
metric_aggregation_type,
adjustment_type,
as_name,
min_adjustment_magnitude,
scaling_adjustment,
cooldown,
target_tracking_config,
step_adjustments,
estimated_instance_warmup,
predictive_scaling_configuration,
):
policy = FakeScalingPolicy(
name,
policy_type,
metric_aggregation_type,
adjustment_type=adjustment_type,
as_name=as_name,
min_adjustment_magnitude=min_adjustment_magnitude,
scaling_adjustment=scaling_adjustment,
cooldown=cooldown,
target_tracking_config=target_tracking_config,
step_adjustments=step_adjustments,
estimated_instance_warmup=estimated_instance_warmup,
predictive_scaling_configuration=predictive_scaling_configuration,
autoscaling_backend=self,
)
self.policies[name] = policy
return policy
def describe_policies(
self, autoscaling_group_name=None, policy_names=None, policy_types=None
):
return [
policy
for policy in self.policies.values()
if (not autoscaling_group_name or policy.as_name == autoscaling_group_name)
and (not policy_names or policy.name in policy_names)
and (not policy_types or policy.policy_type in policy_types)
]
def delete_policy(self, group_name):
self.policies.pop(group_name, None)
def execute_policy(self, group_name):
policy = self.policies[group_name]
policy.execute()
def update_attached_elbs(self, group_name):
group = self.autoscaling_groups[group_name]
group_instance_ids = set(
state.instance.id for state in group.active_instances()
)
# skip this if group.load_balancers is empty
# otherwise elb_backend.describe_load_balancers returns all available load balancers
if not group.load_balancers:
return
try:
elbs = self.elb_backend.describe_load_balancers(names=group.load_balancers)
except LoadBalancerNotFoundError:
# ELBs can be deleted before their autoscaling group
return
for elb in elbs:
elb_instace_ids = set(elb.instance_ids)
self.elb_backend.register_instances(
elb.name, group_instance_ids - elb_instace_ids, from_autoscaling=True
)
self.elb_backend.deregister_instances(
elb.name, elb_instace_ids - group_instance_ids, from_autoscaling=True
)
def update_attached_target_groups(self, group_name):
group = self.autoscaling_groups[group_name]
group_instance_ids = set(state.instance.id for state in group.instance_states)
# no action necessary if target_group_arns is empty
if not group.target_group_arns:
return
target_groups = self.elbv2_backend.describe_target_groups(
target_group_arns=group.target_group_arns,
load_balancer_arn=None,
names=None,
)
for target_group in target_groups:
asg_targets = [
{"id": x, "port": target_group.port} for x in group_instance_ids
]
self.elbv2_backend.register_targets(target_group.arn, (asg_targets))
def create_or_update_tags(self, tags):
for tag in tags:
group_name = tag["resource_id"]
group = self.autoscaling_groups[group_name]
old_tags = group.tags
new_tags = []
# if key was in old_tags, update old tag
for old_tag in old_tags:
if old_tag["key"] == tag["key"]:
new_tags.append(tag)
else:
new_tags.append(old_tag)
# if key was never in old_tag's add it (create tag)
if not any(new_tag["key"] == tag["key"] for new_tag in new_tags):
new_tags.append(tag)
group.tags = new_tags
def delete_tags(self, tags):
for tag_to_delete in tags:
group_name = tag_to_delete["resource_id"]
key_to_delete = tag_to_delete["key"]
group = self.autoscaling_groups[group_name]
old_tags = group.tags
group.tags = [x for x in old_tags if x["key"] != key_to_delete]
def attach_load_balancers(self, group_name, load_balancer_names):
group = self.autoscaling_groups[group_name]
group.load_balancers.extend(
[x for x in load_balancer_names if x not in group.load_balancers]
)
self.update_attached_elbs(group_name)
def describe_load_balancers(self, group_name):
return self.autoscaling_groups[group_name].load_balancers
def detach_load_balancers(self, group_name, load_balancer_names):
group = self.autoscaling_groups[group_name]
group_instance_ids = set(state.instance.id for state in group.instance_states)
elbs = self.elb_backend.describe_load_balancers(names=group.load_balancers)
for elb in elbs:
self.elb_backend.deregister_instances(
elb.name, group_instance_ids, from_autoscaling=True
)
group.load_balancers = [
x for x in group.load_balancers if x not in load_balancer_names
]
def attach_load_balancer_target_groups(self, group_name, target_group_arns):
group = self.autoscaling_groups[group_name]
group.append_target_groups(target_group_arns)
self.update_attached_target_groups(group_name)
def describe_load_balancer_target_groups(self, group_name):
return self.autoscaling_groups[group_name].target_group_arns
def detach_load_balancer_target_groups(self, group_name, target_group_arns):
group = self.autoscaling_groups[group_name]
group.target_group_arns = [
x for x in group.target_group_arns if x not in target_group_arns
]
for target_group in target_group_arns:
asg_targets = [{"id": x.instance.id} for x in group.instance_states]
self.elbv2_backend.deregister_targets(target_group, (asg_targets))
def suspend_processes(self, group_name, scaling_processes):
all_proc_names = [
"Launch",
"Terminate",
"AddToLoadBalancer",
"AlarmNotification",
"AZRebalance",
"HealthCheck",
"InstanceRefresh",
"ReplaceUnhealthy",
"ScheduledActions",
]
group = self.autoscaling_groups[group_name]
set_to_add = set(scaling_processes or all_proc_names)
group.suspended_processes = list(
set(group.suspended_processes).union(set_to_add)
)
def resume_processes(self, group_name, scaling_processes):
group = self.autoscaling_groups[group_name]
if scaling_processes:
group.suspended_processes = list(
set(group.suspended_processes).difference(set(scaling_processes))
)
else:
group.suspended_processes = []
def set_instance_protection(
self, group_name, instance_ids, protected_from_scale_in
):
group = | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['EdgeKubernetesArgs', 'EdgeKubernetes']
@pulumi.input_type
class EdgeKubernetesArgs:
def __init__(__self__, *,
worker_instance_types: pulumi.Input[Sequence[pulumi.Input[str]]],
worker_number: pulumi.Input[int],
worker_vswitch_ids: pulumi.Input[Sequence[pulumi.Input[str]]],
addons: Optional[pulumi.Input[Sequence[pulumi.Input['EdgeKubernetesAddonArgs']]]] = None,
availability_zone: Optional[pulumi.Input[str]] = None,
client_cert: Optional[pulumi.Input[str]] = None,
client_key: Optional[pulumi.Input[str]] = None,
cluster_ca_cert: Optional[pulumi.Input[str]] = None,
deletion_protection: Optional[pulumi.Input[bool]] = None,
force_update: Optional[pulumi.Input[bool]] = None,
install_cloud_monitor: Optional[pulumi.Input[bool]] = None,
is_enterprise_security_group: Optional[pulumi.Input[bool]] = None,
key_name: Optional[pulumi.Input[str]] = None,
kube_config: Optional[pulumi.Input[str]] = None,
log_config: Optional[pulumi.Input['EdgeKubernetesLogConfigArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
name_prefix: Optional[pulumi.Input[str]] = None,
new_nat_gateway: Optional[pulumi.Input[bool]] = None,
node_cidr_mask: Optional[pulumi.Input[int]] = None,
password: Optional[pulumi.Input[str]] = None,
pod_cidr: Optional[pulumi.Input[str]] = None,
proxy_mode: Optional[pulumi.Input[str]] = None,
rds_instances: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None,
security_group_id: Optional[pulumi.Input[str]] = None,
service_cidr: Optional[pulumi.Input[str]] = None,
slb_internet_enabled: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
user_data: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None,
worker_data_disks: Optional[pulumi.Input[Sequence[pulumi.Input['EdgeKubernetesWorkerDataDiskArgs']]]] = None,
worker_disk_category: Optional[pulumi.Input[str]] = None,
worker_disk_performance_level: Optional[pulumi.Input[str]] = None,
worker_disk_size: Optional[pulumi.Input[int]] = None,
worker_disk_snapshot_policy_id: Optional[pulumi.Input[str]] = None,
worker_instance_charge_type: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a EdgeKubernetes resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] worker_instance_types: The instance types of worker node, you can set multiple types to avoid NoStock of a certain type
:param pulumi.Input[int] worker_number: The cloud worker node number of the edge kubernetes cluster. Default to 1. It is limited up to 50 and if you want to enlarge it, please apply white list or contact with us.
:param pulumi.Input[str] availability_zone: The ID of availability zone.
:param pulumi.Input[str] client_cert: The path of client certificate, like `~/.kube/client-cert.pem`.
:param pulumi.Input[str] client_key: The path of client key, like `~/.kube/client-key.pem`.
:param pulumi.Input[str] cluster_ca_cert: The path of cluster ca certificate, like `~/.kube/cluster-ca-cert.pem`
:param pulumi.Input[bool] deletion_protection: Whether to enable cluster deletion protection.
:param pulumi.Input[bool] force_update: Default false, when you want to change `vpc_id`, you have to set this field to true, then the cluster will be recreated.
:param pulumi.Input[bool] install_cloud_monitor: Install cloud monitor agent on ECS. default: `true`.
:param pulumi.Input[bool] is_enterprise_security_group: Enable to create advanced security group. default: false. See [Advanced security group](https://www.alibabacloud.com/help/doc-detail/120621.htm).
:param pulumi.Input[str] key_name: The keypair of ssh login cluster node, you have to create it first. You have to specify one of `password` `key_name` `kms_encrypted_password` fields.
:param pulumi.Input[str] kube_config: The path of kube config, like `~/.kube/config`.
:param pulumi.Input[str] name: The kubernetes cluster's name. It is unique in one Alicloud account.
:param pulumi.Input[bool] new_nat_gateway: Whether to create a new nat gateway while creating kubernetes cluster. Default to true. Then openapi in Alibaba Cloud are not all on intranet, So turn this option on is a good choice.
:param pulumi.Input[int] node_cidr_mask: The node cidr block to specific how many pods can run on single node. 24-28 is allowed. 24 means 2^(32-24)-1=255 and the node can run at most 255 pods. default: 24
:param pulumi.Input[str] password: The password of ssh login cluster node. You have to specify one of `password`, `key_name` `kms_encrypted_password` fields.
:param pulumi.Input[str] pod_cidr: [Flannel Specific] The CIDR block for the pod network when using Flannel.
:param pulumi.Input[str] proxy_mode: Proxy mode is option of kube-proxy. options: iptables|ipvs. default: ipvs.
:param pulumi.Input[str] resource_group_id: The ID of the resource group,by default these cloud resources are automatically assigned to the default resource group.
:param pulumi.Input[str] security_group_id: The ID of the security group to which the ECS instances in the cluster belong. If it is not specified, a new Security group will be built.
:param pulumi.Input[str] service_cidr: The CIDR block for the service network. It cannot be duplicated with the VPC CIDR and CIDR used by Kubernetes cluster in VPC, cannot be modified after creation.
:param pulumi.Input[bool] slb_internet_enabled: Whether to create internet load balancer for API Server. Default to true.
:param pulumi.Input[Mapping[str, Any]] tags: Default nil, A map of tags assigned to the kubernetes cluster and work node.
:param pulumi.Input[str] user_data: Windows instances support batch and PowerShell scripts. If your script file is larger than 1 KB, we recommend that you upload the script to Object Storage Service (OSS) and pull it through the internal endpoint of your OSS bucket.
:param pulumi.Input[str] version: Desired Kubernetes version. If you do not specify a value, the latest available version at resource creation is used and no upgrades will occur except you set a higher version number. The value must be configured and increased to upgrade the version when desired. Downgrades are not supported by ACK.
:param pulumi.Input[Sequence[pulumi.Input['EdgeKubernetesWorkerDataDiskArgs']]] worker_data_disks: The data disk configurations of worker nodes, such as the disk type and disk size.
:param pulumi.Input[str] worker_disk_category: The system disk category of worker node. Its valid value are `cloud_efficiency`, `cloud_ssd` and `cloud_essd` and . Default to `cloud_efficiency`.
:param pulumi.Input[str] worker_disk_performance_level: Worker node system disk performance level, when `worker_disk_category` values `cloud_essd`, the optional values are `PL0`, `PL1`, `PL2` or `PL3`, but the specific performance level is related to the disk capacity. For more information, see [Enhanced SSDs](https://www.alibabacloud.com/help/doc-detail/122389.htm). Default is `PL1`.
:param pulumi.Input[int] worker_disk_size: The system disk size of worker node. Its valid value range [20~32768] in GB. Default to 40.
:param pulumi.Input[str] worker_disk_snapshot_policy_id: Worker node system disk auto snapshot policy.
"""
pulumi.set(__self__, "worker_instance_types", worker_instance_types)
pulumi.set(__self__, "worker_number", worker_number)
pulumi.set(__self__, "worker_vswitch_ids", worker_vswitch_ids)
if addons is not None:
pulumi.set(__self__, "addons", addons)
if availability_zone is not None:
pulumi.set(__self__, "availability_zone", availability_zone)
if client_cert is not None:
pulumi.set(__self__, "client_cert", client_cert)
if client_key is not None:
pulumi.set(__self__, "client_key", client_key)
if cluster_ca_cert is not None:
pulumi.set(__self__, "cluster_ca_cert", cluster_ca_cert)
if deletion_protection is not None:
pulumi.set(__self__, "deletion_protection", deletion_protection)
if force_update is not None:
pulumi.set(__self__, "force_update", force_update)
if install_cloud_monitor is not None:
pulumi.set(__self__, "install_cloud_monitor", install_cloud_monitor)
if is_enterprise_security_group is not None:
pulumi.set(__self__, "is_enterprise_security_group", is_enterprise_security_group)
if key_name is not None:
pulumi.set(__self__, "key_name", key_name)
if kube_config is not None:
pulumi.set(__self__, "kube_config", kube_config)
if log_config is not None:
pulumi.set(__self__, "log_config", log_config)
if name is not None:
pulumi.set(__self__, "name", name)
if name_prefix is not None:
pulumi.set(__self__, "name_prefix", name_prefix)
if new_nat_gateway is not None:
pulumi.set(__self__, "new_nat_gateway", new_nat_gateway)
if node_cidr_mask is not None:
pulumi.set(__self__, "node_cidr_mask", node_cidr_mask)
if password is not None:
pulumi.set(__self__, "password", password)
if pod_cidr is not None:
pulumi.set(__self__, "pod_cidr", pod_cidr)
if proxy_mode is not None:
pulumi.set(__self__, "proxy_mode", proxy_mode)
if rds_instances is not None:
pulumi.set(__self__, "rds_instances", rds_instances)
if resource_group_id is not None:
pulumi.set(__self__, "resource_group_id", resource_group_id)
if security_group_id is not None:
pulumi.set(__self__, "security_group_id", security_group_id)
if service_cidr is not None:
pulumi.set(__self__, "service_cidr", service_cidr)
if slb_internet_enabled is not None:
pulumi.set(__self__, "slb_internet_enabled", slb_internet_enabled)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if user_data is not None:
pulumi.set(__self__, "user_data", user_data)
if version is not None:
pulumi.set(__self__, "version", version)
if worker_data_disks is not None:
pulumi.set(__self__, "worker_data_disks", worker_data_disks)
if worker_disk_category is not None:
pulumi.set(__self__, "worker_disk_category", worker_disk_category)
if worker_disk_performance_level is not None:
pulumi.set(__self__, "worker_disk_performance_level", worker_disk_performance_level)
if worker_disk_size is not None:
pulumi.set(__self__, "worker_disk_size", worker_disk_size)
if worker_disk_snapshot_policy_id is not None:
pulumi.set(__self__, "worker_disk_snapshot_policy_id", worker_disk_snapshot_policy_id)
if worker_instance_charge_type is not None:
pulumi.set(__self__, "worker_instance_charge_type", worker_instance_charge_type)
@property
@pulumi.getter(name="workerInstanceTypes")
def worker_instance_types(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The instance types of worker node, you can set multiple types to avoid NoStock of a certain type
"""
return pulumi.get(self, "worker_instance_types")
@worker_instance_types.setter
def worker_instance_types(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "worker_instance_types", value)
@property
@pulumi.getter(name="workerNumber")
def worker_number(self) -> pulumi.Input[int]:
"""
The cloud worker node number of the edge kubernetes cluster. Default to 1. It is limited up to 50 and if you want to enlarge it, please apply white list or contact with us.
"""
return pulumi.get(self, "worker_number")
@worker_number.setter
def worker_number(self, value: pulumi.Input[int]):
pulumi.set(self, "worker_number", value)
@property
@pulumi.getter(name="workerVswitchIds")
def worker_vswitch_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
return pulumi.get(self, "worker_vswitch_ids")
@worker_vswitch_ids.setter
def worker_vswitch_ids(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "worker_vswitch_ids", value)
@property
@pulumi.getter
def addons(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EdgeKubernetesAddonArgs']]]]:
return pulumi.get(self, "addons")
@addons.setter
def addons(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EdgeKubernetesAddonArgs']]]]):
pulumi.set(self, "addons", value)
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> Optional[pulumi.Input[str]]:
"""
The ID of availability zone.
"""
return pulumi.get(self, "availability_zone")
@availability_zone.setter
def availability_zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "availability_zone", value)
@property
@pulumi.getter(name="clientCert")
| |
self.session.add(repo)
self.session.commit()
headers = {"Authorization": "token <KEY>"}
output = self.app.post(
"/api/0/test/pull-request/1/merge", headers=headers
)
self.assertEqual(output.status_code, 404)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(
data,
{
"error": "Pull-Request have been deactivated for this project",
"error_code": "EPULLREQUESTSDISABLED",
},
)
@patch("pagure.lib.notify.send_email")
def test_api_pull_request_merge_only_assigned(self, send_email):
"""Test the api_pull_request_merge method of the flask api when
only assignee can merge the PR and the PR isn't assigned."""
send_email.return_value = True
tests.create_projects(self.session)
tests.create_projects_git(os.path.join(self.path, "repos"), bare=True)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
tests.add_readme_git_repo(os.path.join(self.path, "repos", "test.git"))
tests.add_commit_git_repo(
os.path.join(self.path, "repos", "test.git"), branch="test"
)
tests.create_tokens(self.session)
tests.create_tokens_acl(self.session)
# Create the pull-request to close
repo = pagure.lib.query.get_authorized_project(self.session, "test")
forked_repo = pagure.lib.query.get_authorized_project(
self.session, "test"
)
req = pagure.lib.query.new_pull_request(
session=self.session,
repo_from=forked_repo,
branch_from="test",
repo_to=repo,
branch_to="master",
title="test pull-request",
user="pingou",
)
self.session.commit()
self.assertEqual(req.id, 1)
self.assertEqual(req.title, "test pull-request")
repo = pagure.lib.query.get_authorized_project(self.session, "test")
settings = repo.settings
settings["Only_assignee_can_merge_pull-request"] = True
repo.settings = settings
self.session.add(repo)
self.session.commit()
headers = {"Authorization": "token aa<PASSWORD>"}
output = self.app.post(
"/api/0/test/pull-request/1/merge", headers=headers
)
self.assertEqual(output.status_code, 403)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(
data,
{
"error": "This request must be assigned to be merged",
"error_code": "ENOTASSIGNED",
},
)
@patch("pagure.lib.notify.send_email")
def test_api_pull_request_merge_only_assigned_not_assignee(
self, send_email
):
"""Test the api_pull_request_merge method of the flask api when
only assignee can merge the PR and the PR isn't assigned to the
user asking to merge."""
send_email.return_value = True
tests.create_projects(self.session)
tests.create_projects_git(os.path.join(self.path, "repos"), bare=True)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
tests.add_readme_git_repo(os.path.join(self.path, "repos", "test.git"))
tests.add_commit_git_repo(
os.path.join(self.path, "repos", "test.git"), branch="test"
)
tests.create_tokens(self.session)
tests.create_tokens_acl(self.session)
# Create the pull-request to close
repo = pagure.lib.query.get_authorized_project(self.session, "test")
forked_repo = pagure.lib.query.get_authorized_project(
self.session, "test"
)
req = pagure.lib.query.new_pull_request(
session=self.session,
repo_from=forked_repo,
branch_from="test",
repo_to=repo,
branch_to="master",
title="test pull-request",
user="pingou",
)
self.session.commit()
self.assertEqual(req.id, 1)
self.assertEqual(req.title, "test pull-request")
req.assignee = pagure.lib.query.search_user(self.session, "foo")
self.session.add(req)
self.session.commit()
repo = pagure.lib.query.get_authorized_project(self.session, "test")
settings = repo.settings
settings["Only_assignee_can_merge_pull-request"] = True
repo.settings = settings
self.session.add(repo)
self.session.commit()
headers = {"Authorization": "token <KEY>"}
output = self.app.post(
"/api/0/test/pull-request/1/merge", headers=headers
)
self.assertEqual(output.status_code, 403)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(
data,
{
"error": "Only the assignee can merge this request",
"error_code": "ENOTASSIGNEE",
},
)
@patch("pagure.lib.notify.send_email")
def test_api_pull_request_merge_minimal_score(self, send_email):
"""Test the api_pull_request_merge method of the flask api when
a PR requires a certain minimal score to be merged."""
send_email.return_value = True
tests.create_projects(self.session)
tests.create_projects_git(os.path.join(self.path, "repos"), bare=True)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
tests.add_readme_git_repo(os.path.join(self.path, "repos", "test.git"))
tests.add_commit_git_repo(
os.path.join(self.path, "repos", "test.git"), branch="test"
)
tests.create_tokens(self.session)
tests.create_tokens_acl(self.session)
# Create the pull-request to close
repo = pagure.lib.query.get_authorized_project(self.session, "test")
forked_repo = pagure.lib.query.get_authorized_project(
self.session, "test"
)
req = pagure.lib.query.new_pull_request(
session=self.session,
repo_from=forked_repo,
branch_from="test",
repo_to=repo,
branch_to="master",
title="test pull-request",
user="pingou",
)
self.session.commit()
self.assertEqual(req.id, 1)
self.assertEqual(req.title, "test pull-request")
repo = pagure.lib.query.get_authorized_project(self.session, "test")
settings = repo.settings
settings["Minimum_score_to_merge_pull-request"] = 2
repo.settings = settings
self.session.add(repo)
self.session.commit()
headers = {"Authorization": "token <KEY>"}
output = self.app.post(
"/api/0/test/pull-request/1/merge", headers=headers
)
self.assertEqual(output.status_code, 403)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(
data,
{
"error": "This request does not have the minimum review "
"score necessary to be merged",
"error_code": "EPRSCORE",
},
)
@patch("pagure.lib.notify.send_email")
def test_api_pull_request_merge(self, send_email):
""" Test the api_pull_request_merge method of the flask api. """
send_email.return_value = True
tests.create_projects(self.session)
tests.create_projects_git(os.path.join(self.path, "repos"), bare=True)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
tests.add_readme_git_repo(os.path.join(self.path, "repos", "test.git"))
tests.add_commit_git_repo(
os.path.join(self.path, "repos", "test.git"), branch="test"
)
tests.create_tokens(self.session)
tests.create_tokens_acl(self.session)
# Create the pull-request to close
repo = pagure.lib.query.get_authorized_project(self.session, "test")
forked_repo = pagure.lib.query.get_authorized_project(
self.session, "test"
)
req = pagure.lib.query.new_pull_request(
session=self.session,
repo_from=forked_repo,
branch_from="test",
repo_to=repo,
branch_to="master",
title="test pull-request",
user="pingou",
)
self.session.commit()
self.assertEqual(req.id, 1)
self.assertEqual(req.title, "test pull-request")
headers = {"Authorization": "token <KEY>"}
# Invalid project
output = self.app.post(
"/api/0/foo/pull-request/1/merge", headers=headers
)
self.assertEqual(output.status_code, 404)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(
data, {"error": "Project not found", "error_code": "ENOPROJECT"}
)
# Valid token, wrong project
output = self.app.post(
"/api/0/test2/pull-request/1/merge", headers=headers
)
self.assertEqual(output.status_code, 401)
data = json.loads(output.get_data(as_text=True))
self.assertEqual(
pagure.api.APIERROR.EINVALIDTOK.name, data["error_code"]
)
self.assertEqual(pagure.api.APIERROR.EINVALIDTOK.value, data["error"])
# Invalid PR
output = self.app.post(
"/api/0/test/pull-request/2/merge", headers=headers
)
self.assertEqual(output.status_code, 404)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(
data, {"error": "Pull-Request not found", "error_code": "ENOREQ"}
)
# Create a token for foo for this project
item = pagure.lib.model.Token(
id="foobar_token",
user_id=2,
project_id=1,
expiration=datetime.datetime.utcnow()
+ datetime.timedelta(days=30),
)
self.session.add(item)
self.session.commit()
# Allow the token to merge PR
acls = pagure.lib.query.get_acls(self.session)
for acl in acls:
if acl.name == "pull_request_merge":
break
item = pagure.lib.model.TokenAcl(
token_id="foobar_token", acl_id=acl.id
)
self.session.add(item)
self.session.commit()
headers = {"Authorization": "token foobar_token"}
# User not admin
output = self.app.post(
"/api/0/test/pull-request/1/merge", headers=headers
)
self.assertEqual(output.status_code, 403)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(
data,
{
"error": "You are not allowed to merge/close pull-request "
"for this project",
"error_code": "ENOPRCLOSE",
},
)
headers = {"Authorization": "token <KEY>"}
# Merge PR
output = self.app.post(
"/api/0/test/pull-request/1/merge", headers=headers
)
self.assertEqual(output.status_code, 200)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(data, {"message": "Changes merged!"})
@patch("pagure.lib.notify.send_email")
def test_api_pull_request_merge_conflicting(self, send_email):
""" Test the api_pull_request_merge method of the flask api. """
send_email.return_value = True
tests.create_projects(self.session)
tests.add_content_git_repo(
os.path.join(self.path, "repos", "test.git")
)
# Fork
project = pagure.lib.query.get_authorized_project(self.session, "test")
task = pagure.lib.query.fork_project(
session=self.session, user="pingou", repo=project
)
self.session.commit()
self.assertEqual(
task.get(),
{
"endpoint": "ui_ns.view_repo",
"repo": "test",
"namespace": None,
"username": "pingou",
},
)
# Add content to the fork
tests.add_content_to_git(
os.path.join(self.path, "repos", "forks", "pingou", "test.git"),
filename="foobar",
content="content from the fork",
)
# Add content to the main repo, so they conflict
tests.add_content_to_git(
os.path.join(self.path, "repos", "test.git"),
filename="foobar",
content="content from the main repo",
)
project = pagure.lib.query.get_authorized_project(self.session, "test")
fork = pagure.lib.query.get_authorized_project(
self.session, "test", user="pingou"
)
tests.create_tokens(self.session)
tests.create_tokens_acl(self.session)
# Create the pull-request to close
req = pagure.lib.query.new_pull_request(
session=self.session,
repo_from=fork,
branch_from="master",
repo_to=project,
branch_to="master",
title="test pull-request",
user="pingou",
)
self.session.commit()
self.assertEqual(req.id, 1)
self.assertEqual(req.title, "test pull-request")
headers = {"Authorization": "token <PASSWORD>ddd"}
# Merge PR
output = self.app.post(
"/api/0/test/pull-request/1/merge", headers=headers
)
self.assertEqual(output.status_code, 409)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(
data,
{
"error": "This pull-request conflicts and thus cannot be merged",
"error_code": "EPRCONFLICTS",
},
)
@patch("pagure.lib.notify.send_email")
def test_api_pull_request_merge_user_token(self, send_email):
""" Test the api_pull_request_merge method of the flask api. """
send_email.return_value = True
tests.create_projects(self.session)
tests.create_projects_git(os.path.join(self.path, "repos"), bare=True)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
tests.add_readme_git_repo(os.path.join(self.path, "repos", "test.git"))
tests.add_commit_git_repo(
os.path.join(self.path, "repos", "test.git"), branch="test"
)
tests.create_tokens(self.session, project_id=None)
tests.create_tokens_acl(self.session)
# Create the pull-request to close
repo = pagure.lib.query.get_authorized_project(self.session, "test")
forked_repo = pagure.lib.query.get_authorized_project(
self.session, "test"
)
req = pagure.lib.query.new_pull_request(
session=self.session,
repo_from=forked_repo,
branch_from="test",
repo_to=repo,
branch_to="master",
title="test pull-request",
user="pingou",
)
self.session.commit()
self.assertEqual(req.id, 1)
self.assertEqual(req.title, "test pull-request")
headers = {"Authorization": "token aa<PASSWORD>cccddd"}
# Invalid project
output = self.app.post(
"/api/0/foo/pull-request/1/merge", headers=headers
)
self.assertEqual(output.status_code, 404)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(
data, {"error": "Project not found", "error_code": "ENOPROJECT"}
)
# Valid token, invalid PR
output = self.app.post(
"/api/0/test2/pull-request/1/merge", headers=headers
)
self.assertEqual(output.status_code, 404)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(
data, {"error": "Pull-Request not found", "error_code": "ENOREQ"}
)
# Valid token, invalid PR - other project
output = self.app.post(
"/api/0/test/pull-request/2/merge", headers=headers
)
self.assertEqual(output.status_code, 404)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(
data, {"error": "Pull-Request not found", "error_code": "ENOREQ"}
)
# Create a token for foo for this project
item = pagure.lib.model.Token(
id="foobar_token",
user_id=2,
project_id=1,
expiration=datetime.datetime.utcnow()
+ datetime.timedelta(days=30),
)
self.session.add(item)
self.session.commit()
# Allow the token to merge PR
acls = pagure.lib.query.get_acls(self.session)
acl = None
for acl in acls:
if acl.name == "pull_request_merge":
break
item = pagure.lib.model.TokenAcl(
token_id="foobar_token", acl_id=acl.id
)
self.session.add(item)
self.session.commit()
headers = {"Authorization": "token foobar_token"}
# User not admin
output = self.app.post(
"/api/0/test/pull-request/1/merge", headers=headers
)
self.assertEqual(output.status_code, 403)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(
data,
{
"error": "You are not allowed to merge/close pull-request "
"for this project",
"error_code": "ENOPRCLOSE",
},
)
headers = {"Authorization": "token <KEY>"}
# Merge PR
output = self.app.post(
"/api/0/test/pull-request/1/merge", headers=headers
)
self.assertEqual(output.status_code, 200)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(data, {"message": "Changes merged!"})
@patch("pagure.lib.notify.send_email")
def test_api_pull_request_add_comment(self, mockemail):
""" Test the api_pull_request_add_comment method of the flask api. """
mockemail.return_value = True
tests.create_projects(self.session)
tests.create_tokens(self.session)
tests.create_tokens_acl(self.session)
headers = {"Authorization": "token <KEY>"}
# Invalid project
output = self.app.post(
"/api/0/foo/pull-request/1/comment", headers=headers
)
self.assertEqual(output.status_code, 404)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(
data, {"error": "Project not found", "error_code": "ENOPROJECT"}
)
# Valid token, wrong project
output = self.app.post(
"/api/0/test2/pull-request/1/comment", headers=headers
)
self.assertEqual(output.status_code, 401)
data = json.loads(output.get_data(as_text=True))
self.assertEqual(
pagure.api.APIERROR.EINVALIDTOK.name, data["error_code"]
)
self.assertEqual(pagure.api.APIERROR.EINVALIDTOK.value, data["error"])
# No input
output = self.app.post(
"/api/0/test/pull-request/1/comment", headers=headers
)
self.assertEqual(output.status_code, 404)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(
data, {"error": "Pull-Request not found", "error_code": "ENOREQ"}
)
# Create a pull-request
repo = pagure.lib.query.get_authorized_project(self.session, "test")
forked_repo = pagure.lib.query.get_authorized_project(
self.session, "test"
)
req = pagure.lib.query.new_pull_request(
session=self.session,
repo_from=forked_repo,
branch_from="master",
repo_to=repo,
branch_to="master",
title="test pull-request",
user="pingou",
)
self.session.commit()
self.assertEqual(req.id, 1)
self.assertEqual(req.title, "test pull-request")
# Check comments before
self.session.commit()
request = pagure.lib.query.search_pull_requests(
self.session, project_id=1, requestid=1
)
self.assertEqual(len(request.comments), 0)
data = {"title": "test issue"}
# Incomplete request
output = self.app.post(
"/api/0/test/pull-request/1/comment", data=data, headers=headers
)
self.assertEqual(output.status_code, 400)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(
data,
{
"error": "Invalid | |
<reponame>t-sagara/jageocoder
from functools import lru_cache
import logging
import re
from typing import List, Optional, Union
from sqlalchemy import Column, ForeignKey, Integer, Float, String, Text
from sqlalchemy import or_
from sqlalchemy.orm import deferred
from sqlalchemy.orm import backref, relationship
from jageocoder.address import AddressLevel
from jageocoder.base import Base
from jageocoder.itaiji import converter as itaiji_converter
from jageocoder.result import Result
from jageocoder.strlib import strlib
logger = logging.getLogger(__name__)
class AddressNode(Base):
"""
The address-node structure stored in 'node' table.
Attributes
----------
id : int
The key identifier that is automatically sequentially numbered.
name : str
The name of the address element, such as '東京都' or '新宿区'
name_index : str
The standardized string for indexing created from its name.
x : float
X-coordinate value. (Longitude)
y : float
Y-coordinate value. (Latitude)
level : int
The level of the address element.
The meaning of each value is as follows.
note : string
Note or comment.
parent_id : int
The id of the parent node.
children : list of AddressNode
The child nodes.
"""
__tablename__ = 'node'
id = Column(Integer, primary_key=True)
name = deferred(Column(String(256), nullable=False))
name_index = Column(String(256), nullable=False)
x = deferred(Column(Float, nullable=True))
y = deferred(Column(Float, nullable=True))
level = Column(Integer, nullable=True)
note = deferred(Column(Text, nullable=True))
parent_id = Column(Integer, ForeignKey('node.id'), nullable=True)
children = relationship(
"AddressNode",
cascade="all",
backref=backref("parent", remote_side="AddressNode.id"),
lazy="dynamic",
)
def __init__(self, *args, **kwargs):
"""
The initializer of the node.
In addition to the initialization of the record,
the name_index is also created.
"""
super().__init__(*args, **kwargs)
# Basic attributes
self.name = kwargs.get('name', '')
# Set extended attributes
self.set_attributes(**kwargs)
# For indexing
self.name_index = itaiji_converter.standardize(self.name)
# Relations
self.parent_id = kwargs.get('parent_id', None)
def set_attributes(self, **kwargs):
"""
Set attributes of this node by kwargs values.
'name' can't be modified.
"""
self.x = kwargs.get('x', kwargs.get('lon'))
self.y = kwargs.get('y', kwargs.get('lat'))
self.level = kwargs.get('level')
self.note = kwargs.get('note', None)
def add_child(self, child):
"""
Add a node as a child of this node.
Parameter
---------
child : AddressNode
The node that will be a child node.
"""
self.children.append(child)
def add_to_parent(self, parent):
"""
Add this node as a child of an other node.
Parameter
---------
parent : AddressNode
The node that will be the parent.
"""
self.parent = parent
def get_child(self, target_name):
"""
Get a child node with the specified name.
Parameter
---------
target_name : str
The name (or standardized name) of the target node.
Return
------
Returns the relevand node if it is found,
or None if it is not.
"""
return self.children.filter(or_(
AddressNode.name == target_name,
AddressNode.name_index == target_name
)).one_or_none()
@lru_cache(maxsize=512)
def search_child_with_criteria(self, pattern: str,
max_level: Optional[int] = None):
conds = []
conds.append(AddressNode.name_index.like(pattern))
logger.debug(" conds: name_index LIKE '{}'".format(pattern))
if max_level is not None:
conds.append(AddressNode.level <= max_level)
logger.debug(" and level <= {}".format(max_level))
filtered_children = self.children.filter(*conds).order_by(
AddressNode.id)
return filtered_children
def search_recursive(
self, index: str,
processed_nodes: Optional[List['AddressNode']] = None,
aza_skip: Union[str, bool, None] = None) -> List[Result]:
"""
Search nodes recursively that match the specified address notation.
Parameter
---------
index : str
The standardized address notation.
processed_nodes: List of AddressNode, optional
List of nodes that have already been processed
by TRIE search results
aza_skip: str, bool, optional
Specifies how to skip aza-names.
- Set to 'auto' or None to make the decision automatically
- Set to 'off' or False to not skip
- Set to 'on' or True to always skip
Return
------
A list of relevant AddressNode.
"""
l_optional_prefix = itaiji_converter.check_optional_prefixes(index)
optional_prefix = index[0: l_optional_prefix]
index = index[l_optional_prefix:]
if aza_skip in (None, ''):
aza_skip = 'auto'
elif aza_skip in (True, 'enable'):
aza_skip = 'on'
elif aza_skip in (False, 'disable'):
aza_skip = 'off'
logger.debug("node:{}, index:{}, optional_prefix:{}".format(
self, index, optional_prefix))
if len(index) == 0:
return [Result(self, optional_prefix, 0)]
max_level = None
v = strlib.get_number(index)
if v['i'] > 0:
# If it starts with a number,
# look for a node that matches the numeric part exactly.
substr = '{}.%'.format(v['n'])
else:
# If it starts with not a number,
# look for a node with a maching first letter.
substr = index[0:1] + '%'
if '字' in optional_prefix:
max_level = AddressLevel.AZA
filtered_children = self.search_child_with_criteria(
pattern=substr, max_level=max_level)
# Check if the index begins with an extra character of
# the current node.
if filtered_children.count() == 0 and \
index[0] in itaiji_converter.extra_characters:
logger.debug("Beginning with an extra character: {}".format(
index[0]))
candidates = self.search_recursive(
index[1:], processed_nodes, aza_skip)
if len(candidates) > 0:
new_candidates = []
for candidate in candidates:
new_candidate = Result(
candidate.node,
index[0] + candidate.matched,
l_optional_prefix + candidate.nchars)
new_candidates.append(new_candidate)
return new_candidates
return []
if logger.isEnabledFor(logging.DEBUG):
msg = 'No candidates. Children are; {}'.format(
','.join([x.name for x in self.children]))
logger.debug(msg)
candidates = []
for child in filtered_children:
if child in processed_nodes or []:
logger.debug("-> skipped; {}({})".format(
child.name, child.id))
continue
logger.debug("-> comparing; {}".format(child.name_index))
new_candidates = self._get_candidates_from_child(
child=child,
index=index,
optional_prefix=optional_prefix,
processed_nodes=processed_nodes,
aza_skip=aza_skip)
if len(new_candidates) > 0:
candidates += new_candidates
if self.level == AddressLevel.WARD and self.parent.name == '京都市':
# Street name (通り名) support in Kyoto City
# If a matching part of the search string is found in the
# child nodes, the part before the name is skipped
# as a street name.
for child in self.children:
pos = index.find(child.name_index)
if pos > 0:
offset = pos + len(child.name_index)
rest_index = index[offset:]
logger.debug(
"child:{} match {} chars".format(child, offset))
for cand in child.search_recursive(
rest_index,
processed_nodes, aza_skip):
candidates.append(
Result(cand[0],
optional_prefix +
index[0: offset] + cand[1],
l_optional_prefix +
len(child.name_index) + len(cand[1])
))
# Search for subnodes with queries excludes Aza-name candidates
if aza_skip == 'on' or \
(aza_skip == 'auto' and
self._is_aza_omission_target(processed_nodes)):
msg = "Checking Aza-name, current_node:{}, processed:{}"
logger.debug(msg.format(self, processed_nodes))
aza_positions = itaiji_converter.optional_aza_len(
index, 0)
if len(aza_positions) > 0:
for azalen in aza_positions:
msg = '"{}" in index "{}" can be optional.'
logger.debug(msg.format(index[:azalen], index))
# Note: Disable 'aza_skip' here not to perform
# repeated skip processing.
sub_candidates = self.search_recursive(
index[azalen:],
processed_nodes, aza_skip='off')
if sub_candidates[0].matched == '':
continue
for cand in sub_candidates:
if cand.node.level < AddressLevel.BLOCK and \
cand.node.name_index not in \
itaiji_converter.chiban_heads:
logger.debug("{} is ignored".format(
cand.node.name))
continue
candidates.append(Result(
cand.node,
optional_prefix +
index[0:azalen] + cand.matched,
l_optional_prefix + cand.nchars))
if len(candidates) == 0:
candidates = [Result(self, '', 0)]
logger.debug("node:{} returns {}".format(self.name, candidates))
return candidates
def _get_candidates_from_child(
self, child: 'AddressNode',
index: str, optional_prefix: str,
processed_nodes: List['AddressNode'],
aza_skip: str) -> list:
"""
Get candidates from the child.
Parameters
----------
child: AddressNode
The starting child node.
index: str
Standardized query string. Numeric characters are kept as
original notation.
optional_prefix: str
The option string that preceded the string passed by index.
aza_skip: str
Specifies how to skip aza-names.
Options are 'auto', 'off', and 'on'
Returns
-------
list
The list of candidates.
Each element of the array has the matched AddressNode
as the first element and the matched string
as the second element.
"""
match_len = itaiji_converter.match_len(index, child.name_index)
if match_len == 0:
l_optional_postfix = itaiji_converter.check_optional_postfixes(
child.name_index, child.level)
if l_optional_postfix > 0:
# In case the index string of the child node with optional
# postfixes removed is completely included in the beginning
# of the search string.
# ex. index='2.-8.', child.name_index='2.番' ('番' is a postfix)
optional_postfix = child.name_index[-l_optional_postfix:]
alt_child_index = child.name_index[0: -l_optional_postfix]
logger.debug(
"child:{} has optional postfix {}".format(
child, optional_postfix))
match_len = itaiji_converter.match_len(
index, alt_child_index, removed_postfix=optional_postfix)
if match_len < len(index) and index[match_len] in '-ノ':
match_len += 1
if match_len == 0 and child.name_index.endswith('.条'):
# Support for Sapporo City and other cities that use
# "北3西1" instead of "北3条西1丁目".
alt_child_index = child.name_index.replace('条', '', 1)
logger.debug("child:{} ends with '.条'".format(child))
match_len = itaiji_converter.match_len(index, alt_child_index)
if match_len == 0:
logger.debug("{} doesn't match".format(child.name))
return []
candidates = []
offset = match_len
rest_index = index[offset:]
l_optional_prefix = len(optional_prefix)
logger.debug("child:{} match {} chars".format(child, offset))
for cand in child.search_recursive(
index=rest_index,
processed_nodes=processed_nodes,
aza_skip=aza_skip):
candidates.append(Result(
cand.node,
optional_prefix + index[0:match_len] + cand.matched,
l_optional_prefix + match_len + cand.nchars))
return candidates
def _is_aza_omission_target(
self, processed_nodes: List['AddressNode']) -> bool:
"""
Determine if this node is a target of aza-name omission.
Parameters
----------
processed_nodes: List of AddressNode
List of nodes that have already been processed
by TRIE search results
Returns
-------
bool
True if this node is a target of aza-name ommission.
Otherwise False.
Notes
-----
Sibling and parent nodes of nodes whose | |
beef
7932: [], # Chicken breast, fat-free, mesquite flavor, sliced
7933: [], # Chicken breast, oven-roasted, fat-free, sliced
7934: ["Kielbasa"], # Kielbasa, Polish, turkey and beef, smoked
7935: [], # Chicken breast, roll, oven-roasted
7936: [], # Bologna, pork and turkey, lite
7937: [], # Bologna, pork, turkey and beef
7938: [], # Ham, honey, smoked, cooked
7939: [], # Frankfurter, pork
7940: [], # Macaroni and cheese loaf, chicken, pork and beef
7941: [], # Salami, Italian, pork and beef, dry, sliced, 50% less sodium
7942: [], # Pate, truffle flavor
7943: [], # Turkey, breast, smoked, lemon pepper flavor, 97% fat-free
7944: [], # Turkey, white, rotisserie, deli cut
7945: [], # Frankfurter, beef, heated
7949: [], # Frankfurter, meat, heated
7950: [], # Frankfurter, meat
7951: [], # Scrapple, pork
7952: [], # Bologna, chicken, turkey, pork
7953: [], # Pork sausage, link/patty, fully cooked, microwaved
7954: [], # Sausage, breakfast sausage, beef, pre-cooked, unprepared
7955: [], # Sausage, turkey, fresh, raw
7956: [], # Sausage, beef, fresh, cooked
7957: [], # Sausage, pork and turkey, pre-cooked
7958: [], # Sausage, turkey, fresh, cooked
7959: [], # Bologna, chicken, pork, beef
7960: [], # Bologna, chicken, pork
7961: [], # Chicken breast, deli, rotisserie seasoned, sliced, prepackaged
7962: [], # Frankfurter, meat and poultry, unheated
7963: [], # Frankfurter, meat and poultry, cooked, boiled
7964: [], # Frankfurter, meat and poultry, cooked, grilled
7965: [], # Pork sausage, link/patty, reduced fat, unprepared
7966: [], # Pork sausage, link/patty, reduced fat, cooked, pan-fried
7967: [], # Pork sausage, link/patty, fully cooked, unheated
7968: [], # Kielbasa, fully cooked, grilled
7969: [], # Kielbasa, fully cooked, pan-fried
7970: [], # Kielbasa, fully cooked, unheated
7971: [], # Bologna, meat and poultry
7972: [], # Meatballs, frozen, Italian style
7973: [], # Bacon, turkey, microwaved
7974: [], # Bacon, turkey, low sodium
7976: [], # Sausage, chicken or turkey, Italian style, lower sodium
7977: [], # Ham, smoked, extra lean, low sodium
7978: [], # Pork sausage, reduced sodium, cooked
7979: [], # Sausage, pork, turkey, and beef, reduced sodium
8002: [], # Cereals ready-to-eat, POST, ALPHA-BITS
8010: [], # Cereals ready-to-eat, QUAKER, CAP'N CRUNCH
8011: [], # Cereals ready-to-eat, QUAKER, CAP'N CRUNCH with CRUNCHBERRIES
8012: [], # Cereals ready-to-eat, QUAKER, CAP'N CRUNCH'S PEANUT BUTTER CRUNCH
8013: [], # Cereals ready-to-eat, GENERAL MILLS, CHEERIOS
8015: [], # Cereals ready-to-eat, POST, COCOA PEBBLES
8018: [], # Cereals ready-to-eat, QUAKER, QUAKER CRUNCHY BRAN
8025: [], # Cereals ready-to-eat, RALSTON CRISP RICE
8029: [], # Cereals ready-to-eat, POST Bran Flakes
8034: [], # Cereals ready-to-eat, POST, FRUITY PEBBLES
8037: ["Granola cereal"], # Cereals ready-to-eat, granola, homemade
8038: [], # Cereals ready-to-eat, POST, GRAPE-NUTS Cereal
8039: [], # Cereals ready-to-eat, POST, GRAPE-NUTS Flakes
8046: ["Honeycomb cereal"], # Cereals ready-to-eat, POST, Honeycomb Cereal
8047: [], # Cereals ready-to-eat, QUAKER, KING VITAMAN
8049: [], # Cereals ready-to-eat, QUAKER, QUAKER OAT LIFE, plain
8054: [], # Cereals ready-to-eat, QUAKER, 100% Natural Granola, Oats, Wheat and Honey
8059: [], # Cereals ready-to-eat, QUAKER, SWEET CRUNCH/QUISP
8061: [], # Cereals ready-to-eat, POST Raisin Bran Cereal
8066: [], # Cereals ready-to-eat, QUAKER, QUAKER Puffed Rice
8073: [], # Cereals ready-to-eat, POST, GOLDEN CRISP
8074: [], # Cereals ready-to-eat, RALSTON TASTEEOS
8081: [], # Cereals ready-to-eat, POST, Honey Nut Shredded Wheat
8083: [], # Cereals ready-to-eat, MALT-O-MEAL, CORN BURSTS
8084: ["Wheat germ cereal"], # Cereals ready-to-eat, wheat germ, toasted, plain
8085: [], # Cereals ready-to-eat, SUN COUNTRY, KRETSCHMER Honey Crunch Wheat Germ
8090: [], # Cereals, corn grits, white, regular and quick, enriched, dry
8091: [], # Cereals, corn grits, white, regular and quick, enriched, cooked with water, without salt
8092: [], # Cereals, QUAKER, corn grits, instant, plain, dry
8093: [], # Cereals, QUAKER, corn grits, instant, plain, prepared (microwaved or boiling water added), without salt
8094: [], # Cereals, QUAKER, corn grits, instant, cheddar cheese flavor, dry
8096: [], # Cereals, QUAKER, Instant Grits, Country Bacon flavor, dry
8100: [], # Cereals, CREAM OF RICE, dry
8102: [], # Cereals, CREAM OF WHEAT, regular, 10 minute cooking, dry
8103: [], # Cereals, CREAM OF WHEAT, regular (10 minute), cooked with water, without salt
8104: [], # Cereals, farina, enriched, assorted brands including CREAM OF WHEAT, quick (1-3 minutes), dry
8105: [], # Cereals, farina, enriched, assorted brands including CREAM OF WHEAT, quick (1-3 minutes), cooked with water, without salt
8106: [], # Cereals, CREAM OF WHEAT, instant, dry
8107: [], # Cereals, CREAM OF WHEAT, instant, prepared with water, without salt
8116: [], # Cereals, MALT-O-MEAL, original, plain, dry
8120: ["Oat cereal"], # Cereals, oats, regular and quick, not fortified, dry
8121: [], # Cereals, oats, regular and quick, unenriched, cooked with water (includes boiling and microwaving), without salt
8122: [], # Cereals, oats, instant, fortified, plain, dry
8123: [], # Cereals, oats, instant, fortified, plain, prepared with water (boiling water added or microwaved)
8124: [], # Cereals, QUAKER, Instant Oatmeal, apples and cinnamon, dry
8128: [], # Cereals, oats, instant, fortified, with cinnamon and spice, dry
8129: [], # Cereals, oats, instant, fortified, with cinnamon and spice, prepared with water
8130: [], # Cereals, QUAKER, Instant Oatmeal, maple and brown sugar, dry
8133: [], # Cereals, oats, instant, fortified, with raisins and spice, prepared with water
8138: [], # Cereals ready-to-eat, MALT-O-MEAL, MARSHMALLOW MATEYS
8142: [], # Cereals, WHEATENA, dry
8143: [], # Cereals, WHEATENA, cooked with water
8144: [], # Cereals, whole wheat hot natural cereal, dry
8145: [], # Cereals, whole wheat hot natural cereal, cooked with water, without salt
8146: [], # Cereals ready-to-eat, QUAKER, QUAKER Puffed Wheat
8147: [], # Cereals ready-to-eat, POST, Shredded Wheat, original big biscuit
8148: [], # Cereals ready-to-eat, POST, Shredded Wheat, original spoon-size
8156: [], # Cereals ready-to-eat, rice, puffed, fortified
8157: [], # Cereals ready-to-eat, wheat, puffed, fortified
8160: [], # Cereals, corn grits, yellow, regular and quick, unenriched, dry
8161: [], # Cereals, corn grits, white, regular and quick, enriched, cooked with water, with salt
8164: [], # Cereals, corn grits, yellow, regular and quick, enriched, cooked with water, without salt
8165: [], # Cereals, corn grits, yellow, regular, quick, enriched, cooked with water, with salt
8168: [], # Cereals, CREAM OF RICE, cooked with water, with salt
8169: [], # Cereals, CREAM OF WHEAT, regular (10 minute), cooked with water, with salt
8172: [], # Cereals, farina, unenriched, dry
8173: [], # Cereals, farina, enriched, cooked with water, with salt
8177: [], # Cereals, MALT-O-MEAL, chocolate, dry
8180: [], # Cereals, oats, regular and quick and instant, unenriched, cooked with water (includes boiling and microwaving), with salt
8182: [], # Cereals, WHEATENA, cooked with water, with salt
8183: [], # Cereals, whole wheat hot natural cereal, cooked with water, with salt
8191: [], # Cereals ready-to-eat, POST, Shredded Wheat, lightly frosted, spoon-size
8192: [], # Cereals ready-to-eat, POST SELECTS Blueberry Morning
8200: [], # Cereals, QUAKER, QUAKER MultiGrain Oatmeal, dry
8204: [], # Cereals ready-to-eat, chocolate-flavored frosted puffed corn
8206: [], # Cereals ready-to-eat, MALT-O-MEAL, COCO-ROOS
8210: [], # Cereals ready-to-eat, QUAKER, QUAKER OAT CINNAMON LIFE
8211: [], # Cereals ready-to-eat, QUAKER, HONEY GRAHAM OH!S
8214: [], # Cereals ready-to-eat, QUAKER, Oatmeal Squares
8215: [], # Cereals ready-to-eat, QUAKER, Oatmeal Squares, cinnamon
8216: [], # Cereals ready-to-eat, QUAKER, Toasted Multigrain Crisps
8218: [], # Cereals ready-to-eat, QUAKER, QUAKER 100% Natural Granola with Oats, Wheat, Honey, and Raisins
8220: [], # Cereals ready-to-eat, QUAKER, Low Fat 100% Natural Granola with Raisins
8221: [], # Cereals, QUAKER, Instant Grits, Butter flavor, dry
8225: [], # Cereals, QUAKER, Instant Oatmeal, fruit and cream variety, dry
8228: [], # Cereals, QUAKER, Instant Oatmeal, raisins, dates and walnuts, dry
8231: [], | |
"""network:
epanet file: Net3.inp
scenario:
scenario file: Net3.tsg
merlion: false
detection: [1, 2, 3]
scenarios:
-
scenario file: Net3.tsg
merlion: false
detection: [1, 2, 3]
-
scenario file: Net3.tsg
merlion: true
detection: []
nodes: []
impact:
metric: MC
flushing:
flush nodes:
feasible nodes: ALL
infeasible nodes: NONE
max nodes: 2
rate: 600.0
response time: 60.0
duration: 600.0
close valves:
feasible pipes: ALL
infeasible pipes: NONE
max pipes: 2
response time: 60.0
"""
self.config['scenarios'].append()
self.config['scenarios'].append({'merlion': True, 'detection': []})
test = _display(self.config)
sys.stdout.write(test)
self.assertEqual(test, reference)
def test_display_userdata_default(self):
test = _display(self.config, 'userdata')
sys.stdout.write(test)
self.assertEqual(test, "")
def test_display_userdata_list(self):
self.config['scenarios'].append()
test = _display(self.config, 'userdata')
sys.stdout.write(test)
self.assertEqual(test, """scenarios:
-
""")
def test_display_userdata_list_nonDefault(self):
self.config['scenarios'].append()
self.config['scenarios'].append({'merlion': True, 'detection': []})
test = _display(self.config, 'userdata')
sys.stdout.write(test)
self.assertEqual(test, """scenarios:
-
-
merlion: true
detection: []
""")
def test_display_userdata_block(self):
self.config.add("foo", ConfigValue(0, int, None, None))
self.config.add("bar", ConfigBlock())
test = _display(self.config, 'userdata')
sys.stdout.write(test)
self.assertEqual(test, "")
def test_display_userdata_block_nonDefault(self):
self.config.add("foo", ConfigValue(0, int, None, None))
self.config.add("bar", ConfigBlock(implicit=True)) \
.add("baz", ConfigBlock())
test = _display(self.config, 'userdata')
sys.stdout.write(test)
self.assertEqual(test, "bar:\n")
def test_unusedUserValues_default(self):
test = '\n'.join(x.name(True) for x in self.config.unused_user_values())
sys.stdout.write(test)
self.assertEqual(test, "")
def test_unusedUserValues_scalar(self):
self.config['scenario']['merlion'] = True
test = '\n'.join(x.name(True) for x in self.config.unused_user_values())
sys.stdout.write(test)
self.assertEqual(test, "scenario.merlion")
def test_unusedUserValues_list(self):
self.config['scenarios'].append()
test = '\n'.join(x.name(True) for x in self.config.unused_user_values())
sys.stdout.write(test)
self.assertEqual(test, """scenarios
scenarios[0]""")
def test_unusedUserValues_list_nonDefault(self):
self.config['scenarios'].append()
self.config['scenarios'].append({'merlion': True, 'detection': []})
test = '\n'.join(x.name(True) for x in self.config.unused_user_values())
sys.stdout.write(test)
self.assertEqual(test, """scenarios
scenarios[0]
scenarios[1]
scenarios[1].merlion
scenarios[1].detection""")
def test_unusedUserValues_list_nonDefault_listAccessed(self):
self.config['scenarios'].append()
self.config['scenarios'].append({'merlion': True, 'detection': []})
for x in self.config['scenarios']:
pass
test = '\n'.join(x.name(True) for x in self.config.unused_user_values())
sys.stdout.write(test)
self.assertEqual(test, """scenarios[0]
scenarios[1]
scenarios[1].merlion
scenarios[1].detection""")
def test_unusedUserValues_list_nonDefault_itemAccessed(self):
self.config['scenarios'].append()
self.config['scenarios'].append({'merlion': True, 'detection': []})
self.config['scenarios'][1]['merlion']
test = '\n'.join(x.name(True) for x in self.config.unused_user_values())
sys.stdout.write(test)
self.assertEqual(test, """scenarios[0]
scenarios[1].detection""")
def test_unusedUserValues_topBlock(self):
self.config.add('foo', ConfigBlock())
test = '\n'.join(x.name(True) for x in self.config.unused_user_values())
sys.stdout.write(test)
self.assertEqual(test, "")
def test_unusedUserValues_subBlock(self):
self.config['scenario'].add('foo', ConfigBlock())
test = '\n'.join(x.name(True) for x in self.config.unused_user_values())
sys.stdout.write(test)
self.assertEqual(test, """scenario
scenario.foo""")
def test_UserValues_default(self):
test = '\n'.join(x.name(True) for x in self.config.user_values())
sys.stdout.write(test)
self.assertEqual(test, "")
def test_UserValues_scalar(self):
self.config['scenario']['merlion'] = True
test = '\n'.join(x.name(True) for x in self.config.user_values())
sys.stdout.write(test)
self.assertEqual(test, "scenario.merlion")
def test_UserValues_list(self):
self.config['scenarios'].append()
test = '\n'.join(x.name(True) for x in self.config.user_values())
sys.stdout.write(test)
self.assertEqual(test, """scenarios
scenarios[0]""")
def test_UserValues_list_nonDefault(self):
self.config['scenarios'].append()
self.config['scenarios'].append({'merlion': True, 'detection': []})
test = '\n'.join(x.name(True) for x in self.config.user_values())
sys.stdout.write(test)
self.assertEqual(test, """scenarios
scenarios[0]
scenarios[1]
scenarios[1].merlion
scenarios[1].detection""")
def test_UserValues_list_nonDefault_listAccessed(self):
self.config['scenarios'].append()
self.config['scenarios'].append({'merlion': True, 'detection': []})
for x in self.config['scenarios']:
pass
test = '\n'.join(x.name(True) for x in self.config.user_values())
sys.stdout.write(test)
self.assertEqual(test, """scenarios
scenarios[0]
scenarios[1]
scenarios[1].merlion
scenarios[1].detection""")
def test_UserValues_list_nonDefault_itemAccessed(self):
self.config['scenarios'].append()
self.config['scenarios'].append({'merlion': True, 'detection': []})
self.config['scenarios'][1]['merlion']
test = '\n'.join(x.name(True) for x in self.config.user_values())
sys.stdout.write(test)
self.assertEqual(test, """scenarios
scenarios[0]
scenarios[1]
scenarios[1].merlion
scenarios[1].detection""")
def test_UserValues_topBlock(self):
self.config.add('foo', ConfigBlock())
test = '\n'.join(x.name(True) for x in self.config.user_values())
sys.stdout.write(test)
self.assertEqual(test, "")
def test_UserValues_subBlock(self):
self.config['scenario'].add('foo', ConfigBlock())
test = '\n'.join(x.name(True) for x in self.config.user_values())
sys.stdout.write(test)
self.assertEqual(test, """scenario
scenario.foo""")
def test_parseDisplayAndValue_default(self):
if not using_yaml:
self.skipTest("Cannot execute test because PyYAML is not available")
test = _display(self.config)
sys.stdout.write(test)
self.assertEqual(yaml.load(test), self.config.value())
def test_parseDisplayAndValue_list(self):
if not using_yaml:
self.skipTest("Cannot execute test because PyYAML is not available")
self.config['scenarios'].append()
self.config['scenarios'].append({'merlion': True, 'detection': []})
test = _display(self.config)
sys.stdout.write(test)
self.assertEqual(yaml.load(test), self.config.value())
def test_parseDisplay_userdata_default(self):
if not using_yaml:
self.skipTest("Cannot execute test because PyYAML is not available")
test = _display(self.config, 'userdata')
sys.stdout.write(test)
self.assertEqual(yaml.load(test), None)
def test_parseDisplay_userdata_list(self):
if not using_yaml:
self.skipTest("Cannot execute test because PyYAML is not available")
self.config['scenarios'].append()
test = _display(self.config, 'userdata')
sys.stdout.write(test)
self.assertEqual(yaml.load(test), {'scenarios': [None]})
def test_parseDisplay_userdata_list_nonDefault(self):
if not using_yaml:
self.skipTest("Cannot execute test because PyYAML is not available")
self.config['scenarios'].append()
self.config['scenarios'].append({'merlion': True, 'detection': []})
test = _display(self.config,'userdata')
sys.stdout.write(test)
self.assertEqual(
yaml.load(test), {'scenarios':
[None, {'merlion': True,
'detection': []}]})
def test_parseDisplay_userdata_block(self):
if not using_yaml:
self.skipTest("Cannot execute test because PyYAML is not available")
self.config.add("foo", ConfigValue(0, int, None, None))
self.config.add("bar", ConfigBlock())
test = _display(self.config, 'userdata')
sys.stdout.write(test)
self.assertEqual(yaml.load(test), None)
def test_parseDisplay_userdata_block_nonDefault(self):
if not using_yaml:
self.skipTest("Cannot execute test because PyYAML is not available")
self.config.add("foo", ConfigValue(0, int, None, None))
self.config.add("bar", ConfigBlock(implicit=True)) \
.add("baz", ConfigBlock())
test = _display(self.config, 'userdata')
sys.stdout.write(test)
self.assertEqual(yaml.load(test), {'bar': None})
def test_value_ConfigValue(self):
val = self.config['flushing']['flush nodes']['rate']
self.assertIs(type(val), float)
self.assertEqual(val, 600.0)
def test_value_ConfigList_empty(self):
val = self.config['nodes'].value()
self.assertIs(type(val), list)
self.assertEqual(val, [])
def test_value_ConfigList_simplePopulated(self):
self.config['nodes'].append('1')
self.config['nodes'].append(3)
self.config['nodes'].append()
val = self.config['nodes'].value()
self.assertIs(type(val), list)
self.assertEqual(len(val), 3)
self.assertEqual(val, [1, 3, 0])
def test_value_ConfigList_complexPopulated(self):
self.config['scenarios'].append()
val = self.config['scenarios'].value()
self.assertIs(type(val), list)
self.assertEqual(len(val), 1)
self.assertEqual(val, [{'detection': [1, 2, 3],
'merlion': False,
'scenario file': 'Net3.tsg'}])
def test_name(self):
self.config['scenarios'].append()
self.assertEqual(self.config.name(), "")
self.assertEqual(self.config['scenarios'].name(), "scenarios")
self.assertEqual(self.config['scenarios'][0].name(), "[0]")
self.assertEqual(self.config['scenarios'][0].get('merlion').name(),
"merlion")
def test_name_fullyQualified(self):
self.config['scenarios'].append()
self.assertEqual(self.config.name(True), "")
self.assertEqual(self.config['scenarios'].name(True), "scenarios")
self.assertEqual(self.config['scenarios'][0].name(True), "scenarios[0]")
self.assertEqual(self.config['scenarios'][0].get('merlion').name(True),
"scenarios[0].merlion")
def test_setValue_scalar(self):
self.config['flushing']['flush nodes']['rate'] = 50
val = self.config['flushing']['flush nodes']['rate']
self.assertIs(type(val), float)
self.assertEqual(val, 50.0)
def test_setValue_scalar_badDomain(self):
try:
self.config['flushing']['flush nodes']['rate'] = 'a'
except ValueError:
pass
except:
raise
else:
self.fail('expected test to raise ValueError')
val = self.config['flushing']['flush nodes']['rate']
self.assertIs(type(val), float)
self.assertEqual(val, 600.0)
def test_setValue_scalarList_empty(self):
self.config['scenario']['detection'] = []
val = self.config['scenario']['detection']
self.assertIs(type(val), list)
self.assertEqual(val, [])
def test_setValue_scalarList_withvalue(self):
self.config['scenario']['detection'] = [6]
val = self.config['scenario']['detection']
self.assertIs(type(val), list)
self.assertEqual(val, [6])
def test_setValue_scalarList_badDomain(self):
try:
self.config['scenario']['detection'] = 50
except ValueError:
pass
except:
raise
else:
self.fail('expected test to raise ValueError')
val = self.config['scenario']['detection']
self.assertIs(type(val), list)
self.assertEqual(val, [1, 2, 3])
def test_setValue_scalarList_badSubDomain(self):
try:
self.config['scenario']['detection'] = [5.5, 'a']
except ValueError:
pass
except:
raise
else:
self.fail('expected test to raise ValueError')
val = self.config['scenario']['detection']
self.assertIs(type(val), list)
self.assertEqual(val, [1, 2, 3])
def test_setValue_list_scalardomain_list(self):
self.config['nodes'] = [5, 10]
val = self.config['nodes'].value()
self.assertIs(type(val), list)
self.assertEqual(val, [5, 10])
def test_setValue_list_scalardomain_scalar(self):
self.config['nodes'] = 10
val = self.config['nodes'].value()
self.assertIs(type(val), list)
self.assertEqual(val, [10])
def test_setValue_list_badSubDomain(self):
try:
self.config['nodes'] = [5, 'a']
except ValueError:
pass
except:
raise
else:
self.fail('expected test to raise ValueError')
val = self.config['nodes'].value()
self.assertIs(type(val), list)
self.assertEqual(val, [])
def test_setValue_block_none(self):
ref = self._reference['scenario']
self.config['scenario'] = None
self.assertEqual(ref, self.config['scenario'].value())
self.config['scenario']['merlion'] = True
ref['merlion'] = True
self.assertEqual(ref, self.config['scenario'].value())
self.config['scenario'] = None
self.assertEqual(ref, self.config['scenario'].value())
def test_setValue_block_empty(self):
ref = self._reference['scenario']
self.config['scenario'] = {}
self.assertEqual(ref, self.config['scenario'].value())
self.config['scenario']['merlion'] = True
ref['merlion'] = True
self.assertEqual(ref, self.config['scenario'].value())
self.config['scenario'] = {}
self.assertEqual(ref, self.config['scenario'].value())
def test_setValue_block_simplevalue(self):
_test = {'merlion': True, 'detection': [1]}
ref = self._reference['scenario']
ref.update(_test)
self.config['scenario'] = _test
self.assertEqual(ref, self.config['scenario'].value())
def test_setItem_block_implicit(self):
ref = self._reference
ref['foo'] = 1
self.config['foo'] = 1
self.assertEqual(ref, self.config.value())
ref['bar'] = 1
self.config['bar'] = 1
self.assertEqual(ref, self.config.value())
def test_setItem_block_implicit_domain(self):
ref = self._reference['scenario']
ref['foo'] = '1'
self.config['scenario']['foo'] = 1
self.assertEqual(ref, self.config['scenario'].value())
ref['bar'] = '1'
self.config['scenario']['bar'] = 1
self.assertEqual(ref, self.config['scenario'].value())
def test_setValue_block_noImplicit(self):
_test = {'epanet file': 'no_file.inp', 'foo': 1}
try:
self.config['network'] = _test
except ValueError:
pass
except:
raise
else:
self.fail("Expected test to raise ValueError")
self.assertEqual(self._reference, self.config.value())
def test_setValue_block_implicit(self):
_test = {'scenario': {'merlion': True, 'detection': [1]}, 'foo': 1}
ref = self._reference
ref['scenario'].update(_test['scenario'])
ref['foo'] = 1
self.config.set_value(_test)
self.assertEqual(ref, self.config.value())
_test = {'scenario': {'merlion': True, 'detection': [1]}, 'bar': 1}
ref['bar'] = 1
self.config.set_value(_test)
self.assertEqual(ref, self.config.value())
def test_setValue_block_implicit_domain(self):
_test = {'merlion': True, 'detection': [1], 'foo': 1}
ref = self._reference['scenario']
ref.update(_test)
ref['foo'] = '1'
self.config['scenario'] = _test
self.assertEqual(ref, self.config['scenario'].value())
_test = {'merlion': True, 'detection': [1], 'bar': '1'}
ref['bar'] = '1'
self.config['scenario'] = _test
self.assertEqual(ref, self.config['scenario'].value())
def test_setValue_block_badDomain(self):
_test = {'merlion': True, 'detection': ['a'], 'foo': 1, 'a': 1}
try:
self.config['scenario'] = _test
except ValueError:
pass
except:
raise
else:
self.fail('expected test to raise ValueError')
self.assertEqual(self._reference, self.config.value())
try:
self.config['scenario'] = []
except ValueError:
pass
except:
raise
else:
self.fail('expected test to raise ValueError')
self.assertEqual(self._reference, self.config.value())
def test_default_function(self):
c = ConfigValue(default=lambda: 10, domain=int)
self.assertEqual(c.value(), 10)
c.set_value(5)
self.assertEqual(c.value(), 5)
c.reset()
self.assertEqual(c.value(), 10)
try:
c = ConfigValue(default=lambda x: 10 * x, domain=int)
except TypeError:
pass
else:
self.fail("Expected type error")
try:
c = ConfigValue('a', domain=int)
except ValueError:
pass
except:
raise
else:
self.fail("Expected casting a to int to raise a value error")
def test_getItem_setItem(self):
# a freshly-initialized object should not be accessed
self.assertFalse(self.config._userAccessed)
self.assertFalse(self.config._data['scenario']._userAccessed)
self.assertFalse(self.config._data['scenario']._data['detection']\
._userAccessed)
# Getting a ConfigValue should not access it
self.assertFalse(self.config['scenario'].get('detection')._userAccessed)
#... but should access the parent blocks traversed to get there
self.assertTrue(self.config._userAccessed)
self.assertTrue(self.config._data['scenario']._userAccessed)
self.assertFalse(self.config._data['scenario']._data['detection']\
._userAccessed)
# a freshly-initialized object should not be set
self.assertFalse(self.config._userSet)
self.assertFalse(self.config._data['scenario']._userSet)
self.assertFalse(self.config['scenario']._data['detection']._userSet)
# setting a value should map it to the correct domain
self.assertEqual(self.config['scenario']['detection'], [1, 2, 3])
self.config['scenario']['detection'] = [42.5]
self.assertEqual(self.config['scenario']['detection'], [42])
# setting a ConfigValue should mark it as userSet, but NOT any parent blocks
self.assertFalse(self.config._userSet)
self.assertFalse(self.config._data['scenario']._userSet)
self.assertTrue(self.config['scenario'].get('detection')._userSet)
def test_delitem(self):
config = ConfigBlock(implicit=True)
config.declare('bar', ConfigValue())
self.assertEqual(sorted(config.keys()), ['bar'])
config.foo = 5
self.assertEqual(sorted(config.keys()), ['bar', 'foo'])
del config['foo']
self.assertEqual(sorted(config.keys()), ['bar'])
del config['bar']
self.assertEqual(sorted(config.keys()), [])
def test_generate_documentation(self):
oFile = os.path.join(currdir, 'test_reference.out')
OUTPUT = open(oFile, 'w')
test = self.config.generate_documentation()
OUTPUT.write(test)
OUTPUT.close()
print(test)
self.assertFalse(
pyutilib.misc.comparison.compare_file(oFile, oFile[:-4] + '.txt')[
0])
os.remove(oFile)
def test_generate_custom_documentation(self):
reference = \
"""startBlock{}
startItem{network}
endItem{network}
startBlock{network}
startItem{epanet file}
item{EPANET network inp file}
endItem{epanet file}
endBlock{network}
startItem{scenario}
item{Single scenario block}
endItem{scenario}
startBlock{scenario}
startItem{scenario file}
item{This | |
"""Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
python3 CLI utility for interacting with Network Elements using gNMI.
This utility can be utilized as a reference, or standalone utility, for
interacting with Network Elements which support OpenConfig and gNMI.
Current supported gNMI features:
- GetRequest
- SetRequest (Update, Replace, Delete)
- Target hostname override
- Auto-loads Target cert from Target if not specified
- User/password based authentication
- Certifificate based authentication
Current unsupported gNMI features:
- Subscribe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import logging
import os
import re
import ssl
import sys
import six
try:
import gnmi_pb2
except ImportError:
print('ERROR: Ensure you\'ve installed dependencies from requirements.txt\n'
'eg, pip install -r requirements.txt')
import gnmi_pb2_grpc
__version__ = '0.4'
_RE_PATH_COMPONENT = re.compile(r'''
^
(?P<pname>[^[]+) # gNMI path name
(\[(?P<key>\w+) # gNMI path key
=
(?P<value>.*) # gNMI path value
\])?$
''', re.VERBOSE)
class Error(Exception):
"""Module-level Exception class."""
class XpathError(Error):
"""Error parsing xpath provided."""
class ValError(Error):
"""Error parsing provided val from CLI."""
class JsonReadError(Error):
"""Error parsing provided JSON file."""
class FindTypeError(Error):
"""Error identifying type of provided value."""
def _create_parser():
"""Create parser for arguments passed into the program from the CLI.
Returns:
Argparse object.
"""
parser = argparse.ArgumentParser(description='gNMI CLI utility.')
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter, epilog='\nExample'
' GetRequest without user/password and over-riding Target certificate CN:'
'\npython py_gnmicli.py -t 127.0.0.1 -p 8080 -x \'/access-points/'
'access-point[hostname=test-ap]/\' -rcert ~/certs/target-cert.crt -o '
'openconfig.example.com')
parser.add_argument('-t', '--target', type=str, help='The gNMI Target',
required=True)
parser.add_argument('-p', '--port', type=str, help='The port the gNMI Target '
'is listening on', required=True)
parser.add_argument('-user', '--username', type=str, help='Username to use'
'when establishing a gNMI Channel to the Target',
required=False)
parser.add_argument('-pass', '--password', type=str, help='Password to use'
'when establishing a gNMI Channel to the Target',
required=False)
parser.add_argument('-m', '--mode', choices=[
'get', 'set-update', 'set-replace', 'set-delete', 'subscribe'], help=
'Mode of operation when interacting with network element.'
' Default=get. If set, it can be either value \nor JSON '
'file (prepend filename with "@")', default='get')
parser.add_argument('-val', '--value', type=str, help='Value for SetRequest.'
'\nCan be Leaf value or JSON file. If JSON file, prepend'
' with "@"; eg "@interfaces.json".',
required=False)
parser.add_argument('-pkey', '--private_key', type=str, help='Fully'
'quallified path to Private key to use when establishing'
'a gNMI Channel to the Target', required=False)
parser.add_argument('-rcert', '--root_cert', type=str, help='Fully quallified'
'Path to Root CA to use when building the gNMI Channel',
required=False)
parser.add_argument('-cchain', '--cert_chain', type=str, help='Fully'
'quallified path to Certificate chain to use when'
'establishing a gNMI Channel to the Target', default=None,
required=False)
parser.add_argument('-g', '--get_cert', help='Obtain certificate from gNMI '
'Target when establishing secure gRPC channel.',
required=False, action='store_true')
parser.add_argument('-x', '--xpath', type=str, help='The gNMI path utilized'
'in the GetRequest or Subscirbe', required=True)
parser.add_argument('-o', '--host_override', type=str, help='Use this as '
'Targets hostname/peername when checking it\'s'
'certificate CN. You can check the cert with:\nopenssl '
'x509 -in certificate.crt -text -noout', required=False)
parser.add_argument('-f', '--format', type=str, action='store', help='Format '
'of the GetResponse to be printed. Default=JSON.',
choices=['json', 'protobuff'], default='json',
required=False)
parser.add_argument('-V', '--version', help='Print program version',
action='store_true', required=False)
parser.add_argument('-d', '--debug', help='Enable gRPC debugging',
required=False, action='store_true')
parser.add_argument('-n', '--notls', help='gRPC insecure mode',
required=False, action='store_true')
return parser
def _path_names(xpath):
"""Parses the xpath names.
This takes an input string and converts it to a list of gNMI Path names. Those
are later turned into a gNMI Path Class object for use in the Get/SetRequests.
Args:
xpath: (str) xpath formatted path.
Returns:
list of gNMI path names.
"""
if not xpath or xpath == '/': # A blank xpath was provided at CLI.
return []
return xpath.strip().strip('/').split('/') # Remove leading and trailing '/'.
def _parse_path(p_names):
"""Parses a list of path names for path keys.
Args:
p_names: (list) of path elements, which may include keys.
Returns:
a gnmi_pb2.Path object representing gNMI path elements.
Raises:
XpathError: Unabled to parse the xpath provided.
"""
gnmi_elems = []
for word in p_names:
word_search = _RE_PATH_COMPONENT.search(word)
if not word_search: # Invalid path specified.
raise XpathError('xpath component parse error: %s' % word)
if word_search.group('key') is not None: # A path key was provided.
tmp_key = {}
for x in re.findall(r'\[([^]]*)\]', word):
tmp_key[x.split("=")[0]] = x.split("=")[-1]
gnmi_elems.append(gnmi_pb2.PathElem(name=word_search.group(
'pname'), key=tmp_key))
else:
gnmi_elems.append(gnmi_pb2.PathElem(name=word, key={}))
return gnmi_pb2.Path(elem=gnmi_elems)
def _create_stub(creds, target, port, host_override):
"""Creates a gNMI Stub.
Args:
creds: (object) of gNMI Credentials class used to build the secure channel.
target: (str) gNMI Target.
port: (str) gNMI Target IP port.
host_override: (str) Hostname being overridden for Cert check.
Returns:
a gnmi_pb2_grpc object representing a gNMI Stub.
"""
if creds:
if host_override:
channel = gnmi_pb2_grpc.grpc.secure_channel(target + ':' + port, creds, ((
'grpc.ssl_target_name_override', host_override,),))
else:
channel = gnmi_pb2_grpc.grpc.secure_channel(target + ':' + port, creds)
else:
channel = gnmi_pb2_grpc.grpc.insecure_channel(target + ':' + port)
return gnmi_pb2_grpc.gNMIStub(channel)
def _format_type(json_value):
"""Helper to determine the Python type of the provided value from CLI.
Args:
json_value: (str) Value providing from CLI.
Returns:
json_value: The provided input coerced into proper Python Type.
"""
if (json_value.startswith('-') and json_value[1:].isdigit()) or (
json_value.isdigit()):
return int(json_value)
if (json_value.startswith('-') and json_value[1].isdigit()) or (
json_value[0].isdigit()):
return float(json_value)
if json_value.capitalize() == 'True':
return True
if json_value.capitalize() == 'False':
return False
return json_value # The value is a string.
def _get_val(json_value):
"""Get the gNMI val for path definition.
Args:
json_value: (str) JSON_IETF or file.
Returns:
gnmi_pb2.TypedValue()
"""
val = gnmi_pb2.TypedValue()
if '@' in json_value:
try:
set_json = json.loads(six.moves.builtins.open(
json_value.strip('@'), 'rb').read())
except (IOError, ValueError) as e:
raise JsonReadError('Error while loading JSON: %s' % str(e))
val.json_ietf_val = json.dumps(set_json).encode()
return val
coerced_val = _format_type(json_value)
type_to_value = {bool: 'bool_val', int: 'int_val', float: 'float_val',
str: 'string_val'}
if type_to_value.get(type(coerced_val)):
setattr(val, type_to_value.get(type(coerced_val)), coerced_val)
return val
def _get(stub, paths, username, password):
"""Create a gNMI GetRequest.
Args:
stub: (class) gNMI Stub used to build the secure channel.
paths: gNMI Path
username: (str) Username used when building the channel.
password: (str) Password used when building the channel.
Returns:
a gnmi_pb2.GetResponse object representing a gNMI GetResponse.
"""
if username: # User/pass supplied for Authentication.
return stub.Get(
gnmi_pb2.GetRequest(path=[paths], encoding='JSON_IETF'),
metadata=[('username', username), ('password', password)])
return stub.Get(gnmi_pb2.GetRequest(path=[paths], encoding='JSON_IETF'))
def _set(stub, paths, set_type, username, password, json_value):
"""Create a gNMI SetRequest.
Args:
stub: (class) gNMI Stub used to build the secure channel.
paths: gNMI Path
set_type: (str) Type of gNMI SetRequest.
username: (str) Username used when building the channel.
password: (str) Password used when building the channel.
json_value: (str) JSON_IETF or file.
Returns:
a gnmi_pb2.SetResponse object representing a gNMI SetResponse.
"""
if json_value: # Specifying ONLY a path is possible (eg delete).
val = _get_val(json_value)
path_val = gnmi_pb2.Update(path=paths, val=val,)
kwargs = {}
if username:
kwargs = {'metadata': [('username', username), ('password', password)]}
if set_type == 'delete':
return stub.Set(gnmi_pb2.SetRequest(delete=[paths]), **kwargs)
elif set_type == 'update':
return stub.Set(gnmi_pb2.SetRequest(update=[path_val]), **kwargs)
return stub.Set(gnmi_pb2.SetRequest(replace=[path_val]), **kwargs)
def _build_creds(target, port, get_cert, certs, notls):
"""Define credentials used in gNMI Requests.
Args:
target: (str) gNMI Target.
port: (str) gNMI Target IP port.
get_cert: (str) Certificate should be obtained from Target for gRPC channel.
certs: (dict) Certificates to use in building the gRPC channel.
Returns:
a gRPC.ssl_channel_credentials object.
"""
if notls:
return
if get_cert:
logging.info('Obtaining certificate from Target')
rcert = ssl.get_server_certificate((target, port)).encode('utf-8')
return gnmi_pb2_grpc.grpc.ssl_channel_credentials(
root_certificates=rcert, private_key=certs['private_key'],
certificate_chain=certs['cert_chain'])
return gnmi_pb2_grpc.grpc.ssl_channel_credentials(
root_certificates=certs['root_cert'], private_key=certs['private_key'],
certificate_chain=certs['cert_chain'])
def _open_certs(**kwargs):
"""Opens provided certificate files.
Args:
root_cert: (str) Root certificate file to use in the gRPC channel.
cert_chain: (str) Certificate chain file to use in the gRPC channel.
private_key: (str) Private key file to use in the gRPC channel.
Returns:
root_cert: (str) Root certificate to use in the gRPC channel.
cert_chain: (str) Certificate chain to use in the gRPC channel.
private_key: (str) Private key to use in the gRPC channel.
"""
for key, value in kwargs.items():
if value:
kwargs[key] = six.moves.builtins.open(value, 'rb').read()
return kwargs
def main():
argparser = _create_parser()
args = vars(argparser.parse_args())
if args['version']:
print(__version__)
sys.exit()
if args['debug']:
os.environ['GRPC_TRACE'] = 'all'
os.environ['GRPC_VERBOSITY'] = 'DEBUG'
mode = args['mode']
target = args['target']
port = args['port']
notls = args['notls']
get_cert = args['get_cert']
root_cert = args['root_cert']
cert_chain = args['cert_chain']
json_value = args['value']
private_key = args['private_key']
xpath = args['xpath']
| |
nx.array(fPQ)
Psmooth = fPQ[:,1:4]
Psmooth = nx.array(Psmooth)*to_meters
print 'loaded cached Psmooth from file',results.filename
if Qsmooth is None and not do_smooth_quats:
Qsmooth = QuatSeq( [ cgtypes.quat( q_wxyz ) for q_wxyz in fPQ[:,4:] ])
print 'loaded cached Qsmooth from file',results.filename
print 'Psmooth.shape',Psmooth.shape
except Exception, exc:
print 'WARNING:',str(exc)
print 'Not using cached smoothed data'
else:
ftype='cheby1'
wp_hz = 14.0; gp = 0.001
ws_hz = 28.0; gs = 20.0
hz = 1.0/delta_t
wp = wp_hz/hz
ws = ws_hz/hz
filt_b, filt_a = scipy.signal.iirdesign(wp,ws,gp,gs,ftype=ftype)
Psmooth_cols = []
import scipy_utils
for col in range(P.shape[1]):
Psmooth_cols.append(
scipy_utils.filtfilt(filt_b,filt_a,P[:,col]) )
Psmooth = nx.array(Psmooth_cols)
Psmooth.transpose()
if Psmooth is None and do_smooth_position:
of = ObjectiveFunctionPosition(P, delta_t, alpha,
no_distance_penalty_idxs=interped_p_idxs)
#epsilon1 = 200e6
epsilon1 = 0
#epsilon1 = 150e6
#epsilon1 = 1.0
percent_error_eps = 9
Psmooth = P.copy()
last_err = None
max_iter1 = 10000
count = 0
while count<max_iter1:
count+=1
start = time.time()
del_F = of.get_del_F(Psmooth)
stop = time.time()
print 'P elapsed: % 4.2f secs,'%(stop-start,),
err = nx.sum(nx.sum(del_F**2,axis=1))
print 'sum( norm(del F)):',err
if err < epsilon1:
break
elif last_err is not None:
if err > last_err:
print 'ERROR: error is increasing, aborting'
break
pct_err = (last_err-err)/last_err*100.0
print ' (%3.1f%%)'%(pct_err,)
if pct_err < percent_error_eps:
print 'reached percent_error_eps'
break
last_err = err
Psmooth = Psmooth - lambda1*del_F
if do_smooth_position or return_smooth_position:
outputs.append(Psmooth/to_meters)
if Psmooth is not None:
dPdt_smooth = (Psmooth[2:]-Psmooth[:-2]) / (2*delta_t)
d2Pdt2_smooth = (Psmooth[2:] - 2*Psmooth[1:-1] + Psmooth[:-2]) / (delta_t**2)
if Qsmooth is None and do_smooth_quats:
print 'smoothing quats...'
#gamma = 1000
#gamma = 0.0
of = ObjectiveFunctionQuats(Q, delta_t, beta, gamma,
no_distance_penalty_idxs=slerped_q_idxs)
#epsilon2 = 200e6
epsilon2 = 0
#lambda2 = 2e-9
#lambda2 = 1e-9
#lambda2 = 1e-11
Q_k = Q[:] # make copy
last_err = None
max_iter2 = 2000
count = 0
while count<max_iter2:
count += 1
start = time.time()
del_G = of.get_del_G(Q_k)
D = of._getDistance(Q_k)
E = of._getEnergy(Q_k)
R = of._getRoll(Q_k)
print ' G = %s + %s*%s + %s*%s'%(str(D),str(beta),str(E),str(gamma),str(R))
stop = time.time()
err = math.sqrt(nx.sum(nx.array(abs(del_G))**2))
if err < epsilon2:
print 'reached epsilon2'
break
elif last_err is not None:
pct_err = (last_err-err)/last_err*100.0
print 'Q elapsed: % 6.2f secs,'%(stop-start,),
print 'current gradient:',err,
print ' (%4.2f%%)'%(pct_err,)
if err > last_err:
print 'ERROR: error is increasing, aborting'
break
if pct_err < percent_error_eps_quats:
print 'reached percent_error_eps_quats'
break
else:
print 'Q elapsed: % 6.2f secs,'%(stop-start,),
print 'current gradient:',err
last_err = err
Q_k = Q_k*(del_G*-lambda2).exp()
if count>=max_iter2:
print 'reached max_iter2'
Qsmooth = Q_k
if do_smooth_quats or return_smooth_quats:
outputs.append(Qsmooth)
if Qsmooth is not None:
omega_smooth = (Qsmooth[:-1].inverse()*Qsmooth[1:]).log()/delta_t
omega_dot_smooth = ((Qsmooth[1:-1].inverse()*Qsmooth[2:]).log() -
(Qsmooth[:-2].inverse()*Qsmooth[1:-1]).log()) / (delta_t**2)
do_smooth_quats = True # we've got 'em now, one way or another
# body-centric groundspeed (using quaternion rotation)
body_ground_V = rotate_velocity_by_orientation( dPdt, Q[1:-1])
if Qsmooth is not None:
body_ground_V_smooth = rotate_velocity_by_orientation( dPdt_smooth, Qsmooth[1:-1])
airspeed = nx.array((0.0,0,0))
#airspeed = nx.array((-.4,0,0))
dPdt_air = dPdt - airspeed # world centric airspeed
# No need to calculate acceleration relative to air because air
# velocity is always constant thus d2Pdt2_air == d2Pdt2.
if Psmooth is not None:
dPdt_smooth_air = dPdt_smooth - airspeed # world centric airspeed
# body-centric airspeed (using quaternion rotation)
body_air_V = rotate_velocity_by_orientation(dPdt_air,Q[1:-1])
if 0:
# check that coordinate xform doesn't affect velocity magnitude
tmp_V2_a = body_air_V.x**2 + body_air_V.y**2 + body_air_V.z**2
tmp_V2_b = dPdt_air[:,0]**2 + dPdt_air[:,1]**2 + dPdt_air[:,2]**2
for i in range(len(tmp_V2_a)):
print abs(tmp_V2_a[i]-tmp_V2_b[i]),' near 0?'
if Qsmooth is not None:
body_air_V_smooth = rotate_velocity_by_orientation(dPdt_smooth_air,Qsmooth[1:-1])
# compute body-centric angular velocity
omega_body = rotate_velocity_by_orientation( omega, Q[:-1])
if Qsmooth is not None:
omega_smooth_body = rotate_velocity_by_orientation( omega_smooth, Qsmooth[:-1])
t_omega_body = t_P[:-1]
if Qsmooth is not None: # compute forces (for now, smooth data only)
# vector for current orientation (use only indices with velocity info)
orient_parallel = quat_to_orient(Qsmooth)[1:-1]
# vector for current velocity
Vair_orient = dPdt_air/nx.sqrt(nx.sum(dPdt_air**2,axis=1)[:,nx.newaxis])
# compute alpha == angle of attack
aattack = nx.arccos( [nx.dot(v,p) for v,p in zip(Vair_orient,orient_parallel)])
#print aattack*rad2deg
if 0:
Vmag_air2 = body_air_V.x**2 + body_air_V.y**2 + body_air_V.z**2
else:
Vmag_air2 = (body_air_V_smooth.x**2 + body_air_V_smooth.y**2 +
body_air_V_smooth.z**2)
make_norm = reconstruct.norm_vec # normalize vector
if 0:
# calculate body drag based on wooden semi-cylinder model
# of <NAME> (unpublished)
# find vector for normal force
tmp_out_of_plane = [cross(v,p) for v,p in zip(Vair_orient,orient_parallel)]
orient_normal = [cross(p,t) for t,p in zip(tmp_out_of_plane,orient_parallel)]
orient_normal = nx.array([make_norm( o_n ) for o_n in orient_normal])
cyl_diam = 0.5 #mm
cyl_diam = cyl_diam / 1e3 # meters
cyl_height = 1.75 #mm
cyl_height = cyl_height / 1e3 # meters
A = cyl_diam*cyl_height
rho = 1.25 # kg/m^3
C_P=0.16664033221423064*nx.cos(aattack)+0.33552465566450407*nx.cos(aattack)**3
C_N=0.75332031249999987*nx.sin(aattack)
F_P = 0.5*rho*A*C_P*Vmag_air2
F_N = 0.5*rho*A*C_N*Vmag_air2
# convert normal and parallel forces back to world coords
body_drag_world1 = nx.array([ orient_parallel[i] * -F_P[i] for i in range(len(F_P))])
body_drag_world2 = nx.array([ orient_normal[i] * -F_N[i] for i in range(len(F_N))])
body_drag_world = body_drag_world1 + body_drag_world2
else:
body_drag_world = None
t_forces = t_dPdt
# force required to stay aloft
fly_mass = 1e-6 # guesstimate (1 milligram)
G = 9.81 # gravity: meters / second / second
aloft_force = fly_mass*G
# resultant force
# my'' = F + mg - Cy'^2
# F = m(y''-g) - Cy'^2
# F = m(y''-g) - body_drag_world
Garr = nx.array([ [0,0,-9.81] ]*len(t_forces))
resultant = fly_mass*(d2Pdt2_smooth - Garr)
if body_drag_world is not None:
resultant = resultant - body_drag_world
if return_resultant_forces:
outputs.append( (frame[1:-1], resultant) ) # return frame numbers also
# We can attempt to decompose the resultant force r into
# "thrust" t (up and forward relative to body) and "drag" d as
# follows. t + d = r --> r + -d = t
# Calculate "drag" at given velocity.
Vmag_air = nx.sqrt(Vmag_air2)
Vmag_air.shape = Vmag_air.shape[0], 1
Vdir_air = dPdt_air/Vmag_air
if drag_model_for_roll == 'linear':
# Assume drag is linearly proportional to velocity as
# asserted # by <NAME>, 1978, but with the
# appropriate # coefficients, this makes little difference
# on the body angle # vs. terminal velocity relationship.
# Cf_linear was calculated to produce angle of attack
# vs. terminal velocity relation roughly equal to curve of
# # David 1978.
Cf_linear = -0.000012
drag_force = Cf_linear * Vmag_air * Vdir_air
elif drag_model_for_roll == 'v^2':
Cf_V2 = -0.000015
V2 = Vmag_air2[:,nx.newaxis]
drag_force = Cf_V2 * V2 * Vdir_air
if return_drag_force:
outputs.append( (frame[1:-1], drag_force) ) # return frame numbers also
print 'used drag model:',drag_model_for_roll,'to compute roll angle'
thrust_force = resultant - drag_force
if return_thrust_force:
outputs.append( (frame[1:-1], thrust_force) ) # return frame numbers also
# 2 planes : saggital and coronal
# Project thrust_force onto coronal plane.
# Do this by eliminating component of thrust_force in body
# axis direction.
# subtract component of force in direction of fly's body
coronal_thrust_force = nx.array([ tf - nx.dot(tf,op)*op for tf, op in zip(thrust_force, orient_parallel) ])
# get direction of this force
coronal_thrust_dir = nx.array([make_norm(ctf) for ctf in coronal_thrust_force])
fly_up = cgtypes.quat(0,0,0,1) # fly up vector in fly coords
# make sure there is no roll component to offset our results:
Qsmooth_zero_roll = QuatSeq([ euler_to_quat( yaw=quat_to_euler(q)[0], pitch=quat_to_euler(q)[1], roll=0) for q in Qsmooth ])
fly_up_world = [ q*fly_up*q.inverse() for q in Qsmooth_zero_roll[1:-1] ]
fly_up_world = nx.array([(v.x, v.y, v.z) for v in fly_up_world])
if 1:
cos_roll = nx.array([ nx.dot( ctd, fuw ) for ctd, fuw in zip(coronal_thrust_dir, fly_up_world) ])
guess_roll = nx.arccos(cos_roll)
if 0:
# mcp = | u x v | = |u||v|sin t
# dp = u . v = |u||v|cos t
# atan2(mcp,dp) = t
cp = [ cross(u,v) for u,v in zip(fly_up_world, coronal_thrust_dir)]
mcp = nx.array([ math.sqrt(u[0]**2 + u[1]**2 + u[2]**2) for u in cp ])
dp = nx.array([ nx.dot(u,v) for u,v in zip(fly_up_world, coronal_thrust_dir)])
guess_roll2 = [math.atan2( num, denom ) for num,denom in zip(mcp,dp)]
if 0:
for r1, r2 in zip(guess_roll, guess_roll2):
print r1,'?=',r2
if 1:
# XXX hack to fix some sign error somewhere (ARGH!!)
# Note: may not be sign error -- quats represent same rotation
# with 2 quats...
Qsmooth_roll_guess = Qsmooth[:]
for i in range(1,len(Qsmooth_roll_guess)-1):
q = Qsmooth[i]
yaw, pitch, old_roll = quat_to_euler(q)
new_roll = guess_roll[i-1]
#roll = old_roll - new_roll
roll = new_roll
qnew = | |
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from distutils.spawn import find_executable
import logging
import os
import platform
import stat
import subprocess
import common
import util
"""Contains all necessary methods for setting up Android on Linux and Mac OSX.
All update and install methods will check if a download is necessary first.
All download methods will not.
"""
# The default names of the folders the Android SDK and Android NDK will be
# downloaded/installed to.
SDK_NAMES = {
common.LINUX: "android-sdk-linux",
common.MAC: "android-sdk-macosx",
common.WINDOWS: "android-sdk-windows"
}
SDK_VERSIONS = {
common.LINUX: ("http://dl.google.com/android/android-sdk_r24.4.1-linux.tgz",
"725bb360f0f7d04eaccff5a2d57abdd49061326d"),
common.MAC: ("http://dl.google.com/android/android-sdk_r24.4.1-macosx.zip",
"85a9cccb0b1f9e6f1f616335c5f07107553840cd"),
common.WINDOWS: ("http://dl.google.com/android/android-sdk_r24.4.1-windows"
".zip", "66b6a6433053c152b22bf8cab19c0f3fef4eba49"),
common.WINDOWS_32: ("android-ndk-r10e-windows-x86.exe",
"eb6bd8fe26f5e6ddb145fef2602dce518bf4e7b6"),
common.WINDOWS_64: ("android-ndk-r10e-windows-x86_64.exe",
"6735993dbf94f201e789550718b64212190d617a")
}
ANDROID_NDK = "android-ndk-r10e"
NDK_DOWNLOAD_PREFIX = "http://dl.google.com/android/ndk/"
NDK_VERSIONS = {
common.LINUX_32: ("android-ndk-r10e-linux-x86.bin",
"b970d086d5c91c320c006ea14e58bd1a50e1fe52"),
common.LINUX_64: ("android-ndk-r10e-linux-x86_64.bin",
"c685e5f106f8daa9b5449d0a4f21ee8c0afcb2f6"),
common.MAC: ("android-ndk-r10e-darwin-x86_64.bin",
"b57c2b9213251180dcab794352bfc9a241bf2557")
}
# Packages required for Android SDK updates.
# Does not include platform-tools, as this must be
# checked for and installed first.
# Package title: install_code
ANDROID_SDK_UPDATES = {
"Android SDK Tools": "tools",
"Android SDK Build-tools": "build-tools-23.0.2",
"SDK Platform Android 5.0": "android-21",
"Android TV ARM EABI v7a System Image, Android API 21":
"sys-img-armeabi-v7a-android-tv-21",
# Support Packages
"Android Support Repository, revision 25": "extra-android-m2repository",
"Android Support Library, revision 23.1.1": "extra-android-support",
# Google APIs
"Google Play services, revision 29": "extra-google-google_play_services",
"Google Repository, revision 23": "extra-google-m2repository"
}
class AndroidSetup(object):
"""Contains all necessary methods for setting up the Android.
Attributes:
bash_changed: A boolean indicating whether or not the bashrc or bash
profile has been edited by the script, indicating the user should call
source ~/.bashrc or bash_profile
bash: A string of the path of the user's bashrc/profile.
sdk_path: A string of the location of the Android SDK package.
ndk_path: A string of the location of the Android NDK package.
Raises:
SystemUnsupportedError: If the system not recognised as Linux or Mac OS X.
BadDirectoryError: If the specified SDK or NDK directory does not exist.
"""
def __init__(self, system, options):
self.system = system
if self.system == common.LINUX:
self.bash = os.path.join(common.BASE_DIR, ".bashrc")
elif self.system == common.MAC:
self.bash = os.path.join(common.BASE_DIR, ".bash_profile")
elif self.system == common.WINDOWS:
self.bash = "" # no bash profile on Windows
self.windows_path_update = ""
else:
raise common.SystemUnsupportedError(system)
self.bash_changed = False
self.sdk_path = os.path.join(common.BASE_DIR, options.sdk_location)
if not os.path.isdir(self.sdk_path):
raise common.BadDirectoryError("--android_sdk", self.sdk_path)
self.ndk_path = os.path.join(common.BASE_DIR, options.ndk_location)
if not os.path.isdir(self.ndk_path):
raise common.BadDirectoryError("--android_ndk", self.ndk_path)
def android_install_sdk(self):
"""Checks the directory for installing Android SDK."""
logging.info("Checking for Android SDK...")
# Check if android path is already set up
location = find_executable("android")
if location:
# Strip tools/android out of path
self.sdk_path = os.path.dirname(os.path.dirname(location))
logging.info("Android SDK found at " + self.sdk_path)
return
# Path is not set, but sdk may still exist
android_path = (os.path.join("tools", "android") +
(".bat" if self.system == common.WINDOWS else ""))
location = util.check_dir(self.sdk_path, SDK_NAMES.get(self.system),
android_path)
if location:
self.sdk_path = location
logging.info("Android SDK found at " + self.sdk_path)
return
logging.info("Android SDK not found. Downloading now.")
self.android_download_sdk(self.sdk_path)
def android_download_sdk(self, directory):
"""Download Android SDK and unpack into specified directory.
Args:
directory: String indication of location to unpack SDK to
Raises:
FileDownloadError: SDK tar or zip fails to download
UnknownFileTypeError: If the file downloaded is neither a tar or a zip,
and cannot be extracted.
"""
url, file_hash = SDK_VERSIONS.get(self.system)
suffix = util.get_file_type(url)
sdk_location = os.path.join(directory, "sdk." + suffix)
sdk_location = util.download_file(url, sdk_location, "Android SDK",
file_hash)
if not sdk_location:
raise common.FileDownloadError("http://developer.android.com/sdk/index."
"html#", "Please rerun this script "
"afterwards with the flag\n"
"\t--android_sdk=/path/to/android_sdk")
if suffix == "tgz":
util.extract_tarfile(sdk_location, "r", directory, "Android SDK")
elif suffix == "zip":
util.extract_zipfile(sdk_location, "r", directory, "Android SDK")
else:
raise common.UnknownFileTypeError(suffix, "Please manually extract "
"Android SDK and rerun this script "
"afterwards with the flag\n"
"\t--android_sdk=/path/to/android_sdk")
if self.system == common.MAC:
# Sometimes, permissions aren't set correctly on tools/android on OSX.
# Change permissions to allow execution by user
android = os.path.join(directory, SDK_NAMES.get(self.system), "tools",
"android")
curr_permissions = os.stat(android)
os.chmod(android, curr_permissions.st_mode | stat.S_IXUSR)
# Update self.sdk_path to now include the SDK name
self.sdk_path = os.path.join(self.sdk_path, SDK_NAMES.get(self.system))
def android_update_sdk_path(self):
"""Checks PATH variable and edits bashrc/profile/path for Android SDK."""
tools_update_path = True
platform_update_path = True
if find_executable("android"):
tools_update_path = False
if find_executable("ndk-build"):
platform_update_path = False
if tools_update_path or platform_update_path:
if self.bash: # LINUX or MAC
with open(self.bash, "a") as f:
if tools_update_path:
f.write("export PATH=" + os.path.join(self.sdk_path, "tools")
+ ":$PATH\n")
if platform_update_path:
f.write("export PATH=" + os.path.join(self.sdk_path, "platform"
"-tools") + ":$PATH\n")
else: # WINDOWS
if tools_update_path:
self.windows_path_update = os.path.join(
self.sdk_path, "tools") + os.pathsep + self.windows_path_update
if platform_update_path:
self.windows_path_update = (os.path.join(
self.sdk_path, "platform-tools") + os.pathsep +
self.windows_path_update)
self.bash_changed = True
def android_update_platform_tools(self):
"""Update the Android SDK Platform Tools."""
# This is very verbose, and requires a y/n response.
# Android SDK Platform-tools must be installed before Tools.
subprocess.call(self.sdk_path + "/tools/android update sdk -u -a -t " +
"platform-tools", shell=True)
def android_get_relevant_sdk_updates(self, all_available_updates):
"""Check to see if any of the updates listed as available are relevant.
Args:
all_available_updates: A string of all the updates currently listed as
available to download
Returns:
A list of all the package names which can be downloaded
Raises:
CommandFailedError: If tools/android was unable to run correctly for any
reason.
"""
packages = []
for key in ANDROID_SDK_UPDATES:
if key in all_available_updates:
packages.append(ANDROID_SDK_UPDATES[key])
return packages
def android_get_all_sdk_updates(self):
"""Get a list of all possible android SDK updates as a string."""
logging.info("Checking for updates...")
try:
updates = subprocess.check_output(self.sdk_path +
"/tools/android list sdk", shell=True)
return updates
except subprocess.CalledProcessError:
raise common.CommandFailedError(self.sdk_path + "/tools/android list sdk",
"http://developer.android.com/tools/help/"
"android.html")
def android_update_sdk(self):
"""Checks for and performs any necessary Android SDK updates found."""
updated = False
available_updates = self.android_get_all_sdk_updates()
if "Android SDK Platform-tools" in available_updates:
# Refresh available updates, as tools and build-tools won't show
# without platform-tools.
self.android_update_platform_tools()
available_updates = self.android_get_all_sdk_updates()
updated = True
packages = self.android_get_relevant_sdk_updates(available_updates)
if packages:
subprocess.call(self.sdk_path + "/tools/android update sdk -u -a -t " +
",".join(packages), shell=True)
updated = True
if not updated:
logging.info("\tNo Android SDK updates required.")
def android_install_ndk(self):
"""Checks the directory for installing Android NDK."""
logging.info("Checking for Android NDK...")
# Check if android path is already set up
location = find_executable("ndk-build")
if location:
# Strip ndk-build out of path name
self.sdk_path = os.path.dirname(location)
logging.info("Android NDK found at " + self.ndk_path)
return
# Path is not set, but ndk may still exist
location = util.check_dir(self.ndk_path, ANDROID_NDK, "ndk-build")
if location:
self.ndk_path = location
logging.info("Android NDK found at " + self.ndk_path)
return
logging.info("Android NDK not found. Downloading now.")
self.android_download_ndk(self.ndk_path)
def android_download_ndk(self, directory):
"""Checks OS version and downloads the appropriate Android NDK.
Args:
directory: String indication of location to unpack NDK
Raises:
FileDownloadError: NDK bin or exe fails to download
InstallInterruptError: if the wait for the NDK
"""
if self.system == common.LINUX:
os_version = subprocess.check_output("uname -m", shell=True)
if os_version.strip() == "x86_64":
url, file_hash = NDK_VERSIONS.get(common.LINUX_64)
else:
url, file_hash = NDK_VERSIONS.get(common.LINUX_32)
elif self.system == common.WINDOWS:
os_version = platform.architecture()[0]
if os_version == "64bit":
url, file_hash = NDK_VERSIONS.get(common.WINDOWS_64)
else:
url, file_hash = NDK_VERSIONS.get(common.WINDOWS_32)
else: # self.system = common.MAC
url, file_hash = NDK_VERSIONS.get(self.system)
filetype = util.get_file_type(url)
url = NDK_DOWNLOAD_PREFIX + url
ndk_location = os.path.join(directory, "ndk." + filetype)
ndk_location = util.download_file(url, ndk_location, "Android NDK",
file_hash)
if not ndk_location:
raise common.FileDownloadError("http://developer.android.com/ndk/"
"downloads/index.html", "Please rerun "
"this script afterwards with the flag\n"
"\t--android_ndk=/path/to/android_ndk")
if filetype == "bin":
# Allow execution by all parties.
os.chmod(ndk_location, 0755)
current_dir = os.getcwd()
os.chdir(common.BASE_DIR)
os.system(ndk_location)
os.chdir(current_dir)
os.remove(ndk_location)
elif filetype == "exe":
os.chdir(self.ndk_path)
subprocess.call("start cmd /c " + ndk_location, shell=True)
# toolchain-licenses\COPYING is one of the last things to be extracted.
if not util.wait_for_installation("COPYING", search=True,
basedir=self.ndk_path):
raise common.InstallInterruptError("Android NDK")
os.chdir(current_dir)
else:
raise common.UnknownFileTypeError(filetype, "Please manually extract "
"Android NDK and rerun this script "
"afterwards with the flag\n\t"
"--android_ndk=/path/to/android_ndk")
def android_update_ndk_path(self):
"""Checks bashrc/profile and edits it to include Android NDK path."""
if not find_executable("ndk-build"):
if self.bash:
with open(self.bash, "a") | |
House', 'Lumberjack House'),
('Hyrule Castle Secret Entrance Drop', 'Hyrule Castle Secret Entrance'),
('Hyrule Castle Secret Entrance Stairs', 'Hyrule Castle Secret Entrance'),
('Hyrule Castle Secret Entrance Exit', 'Light World'),
('Bonk Fairy (Light)', 'Bonk Fairy (Light)'),
('Lake Hylia Fairy', 'Lake Hylia Healer Fairy'),
('Lake Hylia Fortune Teller', 'Lake Hylia Fortune Teller'),
('Light Hype Fairy', 'Swamp Healer Fairy'),
('Desert Fairy', 'Desert Healer Fairy'),
('Kings Grave', 'Kings Grave'),
('Tavern North', 'Tavern'),
('Chicken House', 'Chicken House'),
('Aginahs Cave', 'Aginahs Cave'),
('Sahasrahlas Hut', 'Sahasrahlas Hut'),
('Cave Shop (Lake Hylia)', 'Cave Shop (Lake Hylia)'),
('Capacity Upgrade', 'Capacity Upgrade'),
('Kakariko Well Drop', 'Kakariko Well (top)'),
('Kakariko Well Cave', 'Kakariko Well (bottom)'),
('Kakariko Well Exit', 'Light World'),
('Blacksmiths Hut', 'Blacksmiths Hut'),
('Bat Cave Drop', 'Bat Cave (right)'),
('Bat Cave Cave', 'Bat Cave (left)'),
('Bat Cave Exit', 'Light World'),
('Sick Kids House', 'Sick Kids House'),
('Elder House (East)', 'Elder House'),
('Elder House (West)', 'Elder House'),
('Elder House Exit (East)', 'Light World'),
('Elder House Exit (West)', 'Light World'),
('North Fairy Cave Drop', 'North Fairy Cave'),
('North Fairy Cave', 'North Fairy Cave'),
('North Fairy Cave Exit', 'Light World'),
('Lost Woods Gamble', 'Lost Woods Gamble'),
('Fortune Teller (Light)', 'Fortune Teller (Light)'),
('Snitch Lady (East)', 'Snitch Lady (East)'),
('Snitch Lady (West)', 'Snitch Lady (West)'),
('Bush Covered House', 'Bush Covered House'),
('Tavern (Front)', 'Tavern (Front)'),
('Light World Bomb Hut', 'Light World Bomb Hut'),
('Kakariko Shop', 'Kakariko Shop'),
('Lost Woods Hideout Drop', 'Lost Woods Hideout (top)'),
('Lost Woods Hideout Stump', 'Lost Woods Hideout (bottom)'),
('Lost Woods Hideout Exit', 'Light World'),
('Lumberjack Tree Tree', 'Lumberjack Tree (top)'),
('Lumberjack Tree Cave', 'Lumberjack Tree (bottom)'),
('Lumberjack Tree Exit', 'Light World'),
('Cave 45', 'Cave 45'),
('Graveyard Cave', 'Graveyard Cave'),
('Checkerboard Cave', 'Checkerboard Cave'),
('Mini Moldorm Cave', 'Mini Moldorm Cave'),
('Long Fairy Cave', 'Long Fairy Cave'),
('Good Bee Cave', 'Good Bee Cave'),
('20 Rupee Cave', '20 Rupee Cave'),
('50 Rupee Cave', '50 Rupee Cave'),
('Ice Rod Cave', 'Ice Rod Cave'),
('Bonk Rock Cave', 'Bonk Rock Cave'),
('Library', 'Library'),
('Kakariko Gamble Game', 'Kakariko Gamble Game'),
('Potion Shop', 'Potion Shop'),
('Two Brothers House (East)', 'Two Brothers House'),
('Two Brothers House (West)', 'Two Brothers House'),
('Two Brothers House Exit (East)', 'Light World'),
('Two Brothers House Exit (West)', 'Maze Race Ledge'),
('Sanctuary', 'Sanctuary'),
('Sanctuary Grave', 'Sewer Drop'),
('Sanctuary Exit', 'Light World'),
('Old Man House (Bottom)', 'Old Man House'),
('Old Man House Exit (Bottom)', 'Death Mountain'),
('Old Man House (Top)', 'Old Man House Back'),
('Old Man House Exit (Top)', 'Death Mountain'),
('Spectacle Rock Cave Peak', 'Spectacle Rock Cave (Peak)'),
('Spectacle Rock Cave (Bottom)', 'Spectacle Rock Cave (Bottom)'),
('Spectacle Rock Cave', 'Spectacle Rock Cave (Top)'),
('Spectacle Rock Cave Exit', 'Death Mountain'),
('Spectacle Rock Cave Exit (Top)', 'Death Mountain'),
('Spectacle Rock Cave Exit (Peak)', 'Death Mountain'),
('Paradox Cave (Bottom)', 'Paradox Cave Front'),
('Paradox Cave (Middle)', 'Paradox Cave'),
('Paradox Cave (Top)', 'Paradox Cave'),
('Paradox Cave Exit (Bottom)', 'East Death Mountain (Bottom)'),
('Paradox Cave Exit (Middle)', 'East Death Mountain (Bottom)'),
('Paradox Cave Exit (Top)', 'East Death Mountain (Top)'),
('Hookshot Fairy', 'Hookshot Fairy'),
('Fairy Ascension Cave (Bottom)', 'Fairy Ascension Cave (Bottom)'),
('Fairy Ascension Cave (Top)', 'Fairy Ascension Cave (Top)'),
('Fairy Ascension Cave Exit (Bottom)', 'Fairy Ascension Plateau'),
('Fairy Ascension Cave Exit (Top)', 'Fairy Ascension Ledge'),
('Spiral Cave', 'Spiral Cave (Top)'),
('Spiral Cave (Bottom)', 'Spiral Cave (Bottom)'),
('Spiral Cave Exit', 'East Death Mountain (Bottom)'),
('Spiral Cave Exit (Top)', 'Spiral Cave Ledge'),
('Pyramid Fairy', 'Pyramid Fairy'),
('East Dark World Hint', 'East Dark World Hint'),
('Palace of Darkness Hint', 'Palace of Darkness Hint'),
('Dark Lake Hylia Shop', 'Dark Lake Hylia Shop'),
('Dark Lake Hylia Fairy', 'Dark Lake Hylia Healer Fairy'),
('Dark Lake Hylia Ledge Fairy', 'Dark Lake Hylia Ledge Healer Fairy'),
('Dark Lake Hylia Ledge Spike Cave', 'Dark Lake Hylia Ledge Spike Cave'),
('Dark Lake Hylia Ledge Hint', 'Dark Lake Hylia Ledge Hint'),
('Hype Cave', 'Hype Cave'),
('Bonk Fairy (Dark)', 'Bonk Fairy (Dark)'),
('Brewery', 'Brewery'),
('C-Shaped House', 'C-Shaped House'),
('Chest Game', 'Chest Game'),
('Dark World Hammer Peg Cave', 'Dark World Hammer Peg Cave'),
('Red Shield Shop', 'Red Shield Shop'),
('Fortune Teller (Dark)', 'Fortune Teller (Dark)'),
('Dark World Shop', 'Village of Outcasts Shop'),
('Dark World Lumberjack Shop', 'Dark World Lumberjack Shop'),
('Dark World Potion Shop', 'Dark World Potion Shop'),
('Archery Game', 'Archery Game'),
('Mire Shed', 'Mire Shed'),
('Dark Desert Hint', 'Dark Desert Hint'),
('Dark Desert Fairy', 'Dark Desert Healer Fairy'),
('Spike Cave', 'Spike Cave'),
('Hookshot Cave', 'Hookshot Cave'),
('Superbunny Cave (Top)', 'Superbunny Cave (Top)'),
('Cave Shop (Dark Death Mountain)', 'Cave Shop (Dark Death Mountain)'),
('Superbunny Cave (Bottom)', 'Superbunny Cave (Bottom)'),
('Superbunny Cave Exit (Bottom)', 'Dark Death Mountain (East Bottom)'),
('Hookshot Cave Exit (North)', 'Death Mountain Floating Island (Dark World)'),
('Hookshot Cave Back Entrance', 'Hookshot Cave'),
('Mimic Cave', 'Mimic Cave'),
('Inverted Pyramid Hole', 'Pyramid'),
('Inverted Links House', 'Inverted Links House'),
('Inverted Links House Exit', 'South Dark World'),
('Inverted Big Bomb Shop', 'Inverted Big Bomb Shop'),
('Inverted Dark Sanctuary', 'Inverted Dark Sanctuary'),
('Inverted Dark Sanctuary Exit', 'West Dark World'),
('Old Man Cave (West)', 'Bumper Cave'),
('Old Man Cave (East)', 'Death Mountain Return Cave'),
('Old Man Cave Exit (West)', 'West Dark World'),
('Old Man Cave Exit (East)', 'Dark Death Mountain'),
('Dark Death Mountain Fairy', 'Old Man Cave'),
('Bumper Cave (Bottom)', 'Old Man Cave'),
('Bumper Cave (Top)', 'Dark Death Mountain Healer Fairy'),
('Bumper Cave Exit (Top)', 'Death Mountain Return Ledge'),
('Bumper Cave Exit (Bottom)', 'Light World'),
('Death Mountain Return Cave (West)', 'Bumper Cave'),
('Death Mountain Return Cave (East)', 'Death Mountain Return Cave'),
('Death Mountain Return Cave Exit (West)', 'Death Mountain'),
('Death Mountain Return Cave Exit (East)', 'Death Mountain'),
('Hookshot Cave Exit (South)', 'Dark Death Mountain'),
('Superbunny Cave Exit (Top)', 'Dark Death Mountain'),
('Pyramid Exit', 'Light World'),
('Inverted Pyramid Entrance', 'Bottom of Pyramid')]
# non shuffled dungeons
default_dungeon_connections = [('Desert Palace Entrance (South)', 'Desert Palace Main (Inner)'),
('Desert Palace Entrance (West)', 'Desert Palace Main (Outer)'),
('Desert Palace Entrance (North)', 'Desert Palace North'),
('Desert Palace Entrance (East)', 'Desert Palace Main (Outer)'),
('Desert Palace Exit (South)', 'Desert Palace Stairs'),
('Desert Palace Exit (West)', 'Desert Ledge'),
('Desert Palace Exit (East)', 'Desert Palace Lone Stairs'),
('Desert Palace Exit (North)', 'Desert Palace Entrance (North) Spot'),
('Eastern Palace', 'Eastern Palace'),
('Eastern Palace Exit', 'Light World'),
('Tower of Hera', 'Tower of Hera (Bottom)'),
('Tower of Hera Exit', 'Death Mountain (Top)'),
('Hyrule Castle Entrance (South)', 'Hyrule Castle'),
('Hyrule Castle Entrance (West)', 'Hyrule Castle'),
('Hyrule Castle Entrance (East)', 'Hyrule Castle'),
('Hyrule Castle Exit (South)', 'Hyrule Castle Courtyard'),
('Hyrule Castle Exit (West)', 'Hyrule Castle Ledge'),
('Hyrule Castle Exit (East)', 'Hyrule Castle Ledge'),
('Agahnims Tower', 'Agahnims Tower'),
('Agahnims Tower Exit', 'Hyrule Castle Ledge'),
('Thieves Town', 'Thieves Town (Entrance)'),
('Thieves Town Exit', 'West Dark World'),
('Skull Woods First Section Hole (East)', 'Skull Woods First Section (Right)'),
('Skull Woods First Section Hole (West)', 'Skull Woods First Section (Left)'),
('Skull Woods First Section Hole (North)', 'Skull Woods First Section (Top)'),
('Skull Woods First Section Door', 'Skull Woods First Section'),
('Skull Woods First Section Exit', 'Skull Woods Forest'),
('Skull Woods Second Section Hole', 'Skull Woods Second Section (Drop)'),
('Skull Woods Second Section Door (East)', 'Skull Woods Second Section'),
('Skull Woods Second Section Door (West)', 'Skull Woods Second Section'),
('Skull Woods Second Section Exit (East)', 'Skull Woods Forest'),
('Skull Woods Second Section Exit (West)', 'Skull Woods Forest (West)'),
('Skull Woods Final Section', 'Skull Woods Final Section (Entrance)'),
('Skull Woods Final Section Exit', 'Skull Woods Forest (West)'),
('Ice Palace', 'Ice Palace (Entrance)'),
('Ice Palace Exit', 'Dark Lake Hylia Central Island'),
('Misery Mire', 'Misery Mire (Entrance)'),
('Misery Mire Exit', 'Dark Desert'),
('Palace of Darkness', 'Palace of Darkness (Entrance)'),
('Palace of Darkness Exit', 'East Dark World'),
('Swamp Palace', 'Swamp Palace (Entrance)'), # requires additional patch for flooding moat if moved
('Swamp Palace Exit', 'South Dark World'),
('Turtle Rock', 'Turtle Rock (Entrance)'),
('Turtle Rock Exit (Front)', 'Dark Death Mountain (Top)'),
('Turtle Rock Ledge Exit (West)', 'Dark Death Mountain Ledge'),
('Turtle Rock Ledge Exit (East)', 'Dark Death Mountain Ledge'),
('Dark Death Mountain Ledge (West)', 'Turtle Rock (Second Section)'),
('Dark Death Mountain Ledge (East)', | |
<gh_stars>0
import functools
import math
import sys
class Point(object):
def __init__(self, x=None, y=None):
if x is not None:
self.x = float(x)
else:
self.x = None
if y is not None:
self.y = float(y)
else:
self.y = None
def __str__(self):
return '(' + str(self.x) + ', ' + str(self.y) + ')'
def __bool__(self):
""" True if both x and y are defined """
if self.x is not None and self.y is not None:
return True
else:
return False
def __eq__(self, p):
if self.x == p.x and self.y == p.y:
return True
else:
return False
def __ne__(self, p):
if self.x != p.x or self.y != p.y:
return True
else:
return False
def __gt__(self, p):
"""Used for sorting in Andrew's monotone chain algorithm for
determining convex hull of a list of points"""
if self.x > p.x:
return True
elif self.x < p.x:
return False
return self.y > p.y
def __lt__(self, p):
"""Used for sorting in Andrew's monotone chain algorithm for
determining convex hull of a list of points"""
if self.x < p.x:
return True
elif self.x > p.x:
return False
return self.y < p.y
def __ge__(self, p):
"""Used for sorting in Andrew's monotone chain algorithm for
determining convex hull of a list of points"""
if self == p or self > p:
return True
else:
return False
def __le__(self, p):
"""Used for sorting in Andrew's monotone chain algorithm for
determining convex hull of a list of points"""
if self == p or self < p:
return True
else:
return False
def __sub__(self, q):
return Point(self.x - q.x, self.y - q.y)
def __add__(self, q):
return Point(self.x + q.x, self.y + q.y)
def dist(self, q):
return math.sqrt((self.x - q.x) ** 2 + (self.y - q.y) ** 2)
def signed_dist_to_line(self, p, q):
""" Calculate signed distance from self to the line defined by
p and q. Note that the function does not allow correct
comparison of signs between lines parallel to either axis
and lines oriented otherwise.
"""
if p.y == q.y:
return self.y - p.y
elif p.x == q.x:
return self.x - p.x
else:
a = 1 / (q.x - p.x)
b = -1 / (q.y - p.y)
c = p.y / (q.y - p.y) - p.x / (q.x - p.x)
return (a * self.x + b * self.y + c) / math.sqrt(a ** 2 + b ** 2)
def is_within_polygon(self, pol):
""" Determine whether point p is inside polygon;
Uses the crossing number method => works only with simple
polygons.
"""
if not pol:
return None
cn = 0
for n in range(-1, len(pol) - 1):
if ((pol[n].y <= self.y < pol[n + 1].y)
or ((pol[n].y > self.y) and pol[n + 1].y <= self.y)):
if (line_intersection(pol[n], pol[n + 1], self,
Point(self.x - 1, self.y)).x > self.x):
cn += 1
if cn % 2 == 1:
return True
else:
return False
def project_on_path(self, path):
""" Determine the orthogonal projection of a point on a segmented path;
Return projection point and first node of the path segment on which
the point projects. If no projection is possible, return
Point(None, None), None.
"""
mindist = float("inf")
project = Point(None, None)
seg0 = None
for n in range(0, len(path) - 1):
u = Vec(self.x - path[n].x, self.y - path[n].y)
v = Vec(path[n + 1].x - path[n].x, path[n + 1].y - path[n].y)
d = abs(self.signed_dist_to_line(path[n], path[n + 1]))
if ((u.project(v).dot(v) >= 0) and (u.project(v).dist(Point(0, 0))
<= v.dist(Point(0, 0)))
and d < mindist):
mindist = d
project = u.project(v) + path[n]
seg0 = n
if project:
for n in range(1, len(path) - 1):
d = self.dist(path[n])
if d < mindist:
mindist = d
project = path[n]
seg0 = n
return project, seg0
def project_on_path_or_endnode(self, path):
""" Determine the orthogonal projection of a point on a segmented path;
Return projection point and first node of the path segment on which
the point projects. If no projection is possible, choose nearest
endpoint as projection.
"""
mindist = float("inf")
project = Point(None, None)
seg0 = None
for n in range(0, len(path) - 1):
u = Vec(self.x - path[n].x, self.y - path[n].y)
v = Vec(path[n + 1].x - path[n].x, path[n + 1].y - path[n].y)
d = abs(self.signed_dist_to_line(path[n], path[n + 1]))
if ((u.project(v).dot(v) >= 0) and (u.project(v).dist(Point(0, 0))
<= v.dist(Point(0, 0)))
and d < mindist):
mindist = d
project = u.project(v) + path[n]
seg0 = n
for n in range(0, len(path)):
d = self.dist(path[n])
if d < mindist:
mindist = d
project = path[n]
seg0 = n
if seg0 == len(path):
seg0 -= 1
return project, seg0
def project_on_closed_path(self, path):
""" Determine the orthogonal projection of a point on a closed path;
Return projection point and first node of the path segment on which
the point projects.
"""
mindist = float("inf")
project = Point(None, None)
seg0 = None
for n in range(-1, len(path) - 1):
u = Vec(self.x - path[n].x, self.y - path[n].y)
v = Vec(path[n + 1].x - path[n].x, path[n + 1].y - path[n].y)
d = abs(self.signed_dist_to_line(path[n], path[n + 1]))
if ((u.project(v).dot(v) >= 0) and (u.project(v).dist(Point(0, 0))
<= v.dist(Point(0, 0)))
and d < mindist):
mindist = d
project = u.project(v) + path[n]
seg0 = n
if project:
for n in range(0, len(path)):
d = self.dist(path[n])
if d < mindist:
mindist = d
project = path[n]
seg0 = n
for n in range(0, len(path)):
d = self.dist(path[n])
if d < mindist:
mindist = d
project = path[n]
seg0 = n
return project, seg0
def lateral_dist(self, path):
""" Determine lateral distance to center of path. If
distance > 1, the projection of the point is on the
extension of path.
"""
subpath = SegmentedPath()
# need node only
foo, seg_path_center = \
path.center_point().project_on_path_or_endnode(path)
project, seg_project = self.project_on_path_or_endnode(path)
subpath.extend([project, path.center_point()])
if seg_path_center < seg_project:
subpath.reverse()
for n in range(min(seg_path_center, seg_project) + 1,
max(seg_path_center, seg_project)):
subpath.insert(len(subpath) - 1, path[n])
return subpath.length()
def segment_crossing_number(self, path, refp):
""" Return the number of times the line between a point p and a
reference point refp crosses a segmented path (path)
"""
cn = 0
for n in range(0, len(path) - 1):
d, t, u = line_intersection_with_params(self, refp, path[n],
path[n + 1])
# is intersection between self and refp?
if d and (0 <= t <= 1):
# is intersection within path segment?
if 0 <= u < 1:
cn += 1
# if intersecting last segment node, count only if last
# path segment; else it would be counted twice
elif abs(u - 1) < sys.float_info.epsilon:
if n == len(path) - 2:
cn += 1
# if the line tangents the node between two
# segments, i e does not cross the path, regard it
# as no intersection; thus, decrement cn by 1 now
# (net change will be 0)
elif (path[n].signed_dist_to_line(path[n + 1], refp) *
path[n + 2].signed_dist_to_line(path[n + 1],
refp)) > 0:
cn -= 1
elif (u < 0 and n == 0) or (u > 1 and n == len(path) - 2):
pass
return cn
def dist_to_segment(self, path, n):
"""Calculate distance from the point to segment n in path;
First, determine if the orthogonal projection of the point
on the path segment is between the nodes of ("on") the
segment - if not, return distance to the closest node.
Return distance and a flag which is set to 0 if "off" the
first or last node of the path, otherwise to 1
"""
u = Vec(self.x - path[n].x, self.y - path[n].y)
v = Vec(path[n + 1].x - path[n].x, path[n + 1].y - path[n].y)
if (u.project(v).dot(v) >= 0) and (u.project(v).dist(Point(0, 0)) <=
v.dist(Point(0, 0))):
return True, abs(self.signed_dist_to_line(path[n], path[n + 1]))
else: # So, not on segment.
d0, d1 = abs(self.dist(path[n])), abs(self.dist(path[n + 1]))
if n == 0 and d0 < d1:
return False, d0
elif n == len(path) - 2 and d1 < d0:
return False, d1
else:
return True, min(d0, d1)
| |
The interaction modules contain
:param args: command line arguments, with which to identify the type of interaction module
:return: the label identifying the interaction module (can be an enum)
"""
if not hasattr(args, 'pooling_type') or args.pooling_type is None or not args.pooling_type or \
args.pooling_type.lower() == 'none':
args.pooling_type = '' # standard value or this argument for no pooling
return None, None # no pooling
pooling_type = args.pooling_type.lower()
if not hasattr(args, 'pooling_shape'):
shape = PoolingShape.GRID
else:
shape = PoolingShape.GRID if args.pooling_shape == 'grid' else PoolingShape.ARC
if pooling_type in Occupancy_pooling_labels:
type_enum = ShapedBasedPoolingType.OCCUPANCY
elif pooling_type in Occupancy_percentage_pooling_labels:
type_enum = ShapedBasedPoolingType.OCCUPANCY_PERCENT
elif pooling_type in Social_pooling_labels:
type_enum = ShapedBasedPoolingType.SOCIAL
elif pooling_type in Directional_pooling_labels:
type_enum = ShapedBasedPoolingType.DIRECTIONAL
elif pooling_type in Directional_polar_pooling_labels:
type_enum = ShapedBasedPoolingType.DIRECTIONAL_POLAR
elif pooling_type in Distance_pooling_labels:
type_enum = ShapedBasedPoolingType.DISTANCE
elif pooling_type in DistanceDirectional_pooling_labels:
type_enum = ShapedBasedPoolingType.DISTANCE_DIRECTIONAL
else:
raise Exception('Pooling type ' + args.pooling_type + ' is still not available')
return type_enum, shape
def add_parser_arguments_for_testing(parser):
"""
Adds the options that the command line parser will search for, regarding testing of models
:param parser: the argument parser
:return: the same parser, but with the added options.
"""
parser.add_argument('--test_dir', default='datasets_in_trajnetpp21/test', type=str,
help='Path to directory (or single file) where the training data is found.')
parser.add_argument('--test_files_individually', action='store_true',
help='If supplied, will test each file and display results for each file individually')
parser.add_argument('--statistics', action='store_true',
help='If supplied, will perform statistics with the results')
parser.add_argument('--cv', action='store_true',
help='Instead of expecting an LSTM based model, will use a classical constant velocity '
'(--model_path will not be used)')
parser.add_argument('--smf', action='store_true',
help='Instead of expecting an LSTM based model, will expect a standalone Sparse Motion Fields '
'(SMF) ')
parser.add_argument('--model_path', default=os.path.join(os.getcwd(), 'saved_models', 'model.pt'), type=str,
help='Path to retrieve the model from')
parser.add_argument('--model_pred_path', default=None, type=str,
help='Instead of providing actual models, you can provide files containing predictions of '
'trajectories (accepts multimodality - can have more than one prediction for each '
'trajectory). Currently only available for Trajnet++ format (see '
'https://www.aicrowd.com/challenges/trajnet-a-trajectory-forecasting-challenge for more '
'information on the format). The number of prediction files MUST equal the number of data '
'files from --test_dir.')
parser.add_argument('--num_samples', default=1, type=int,
help='If the model is multimodal, number of samples to draw for each trajectory. '
'The metrics to be displayed will be according to the sample that has the smallest ADE')
parser.add_argument('--eval_mode', default='min_ade', type=str,
choices=['min', 'min_ade', 'min_fde', 'min_both', 'max', 'max_ade', 'max_fde', 'max_both',
'average', 'avg', 'mean', 'std', 'standard_deviation', 'st_dev'],
help='For the case of multimodal evaluation, provide a mode to pick the ADE and FDE to display'
'(can use minimum/maximum (min_ade<->min, max_ade<->max, and other variants), mean and '
'standard deviations')
parser.add_argument('--kde_nll', action='store_true',
help='When --num_samples > 1, and with this option supplied, will compute a Kernel Density '
'Estimate Negative Log Likelihood (KDE-NLL). It is a more robust way of evaluating '
'multimodality than best ADE/FDE. A substantially large number of samples is required. '
'For instance, Trajnet++ used 50 samples. Note that more samples can be used '
'(Trajectron++ used 2000), but that will also increase overall computation time.')
parser.add_argument('--ignore_if_kde_nll_fails', action='store_true',
help='Ignore cases where computation of KDE-NLL fails for all epochs. This may happen due to '
'there being no multimodality (e.g. multimodal C.V generating samples with 0 speed.')
parser.add_argument('--environment_location', default=None, type=str,
help='Path to a file or directory of files containing information about the static environment '
'(presences of obstacles). If supplied, will also evaluate from the point of view of '
'collisions with the static obstacles. Note that their position is approximate, so results'
' are not 100% reliable. This will turn on flag --statistics, which in turn will show'
'other information besides collisions with the static environment. \n'
'Example of path: \'datasets_utils/environment/obstacles/biwi_eth_map.txt\'\n'
'Or directory: \'datasets_utils/environment/obstacles\' (assumes the existence of several '
'scenes - several environment files)')
parser.add_argument('--static_collisions_neighbours', action='store_true',
help='Specific to Trajnet++ standard - compute collisions with environment not only for '
'primary pedestrians, but also for neighbours.')
parser.add_argument('--social_metrics', action='store_true',
help='If supplied, will also compute some social-related metrics, like percentages of'
'colliding pedestrians. This will turn on flag --statistics, which in turn will show'
'other information besides this social information. Note also that the statistics may '
'take some time to compute, especially for datasets with lots of pedestrians (e.g. takes '
'much longer for crowds_univ than biwi_eth)')
parser.add_argument('--collision_threshold', default=0.1, type=float,
help='Available for --social_metrics. If two pedestrians get to a distance below this one, a '
'collision between those pedestrians is said to occur. For BIWI/Crowds Datasets, '
'0.1 results in practically no collisions in GT')
parser.add_argument('--num_inter_pts_cols', default=2, type=int,
help='Available for --social_metrics. For computing collisions, this variable defines how many'
'intermediate points in the line segment that connects two pedestrian positions. Increase '
'this value to increase the accuracy of the number of collisions, at the cost of also '
'increasing computation time.')
return parser
def add_parser_arguments_misc(parser):
"""
Adds the options that the command line parser will search for, some miscellaneous parameters, like use of gpu,
timing, etc.
:param parser: the argument parser
:return: the same parser, but with the added options.
"""
parser.add_argument('--use_gpu', action='store_true',
help='use GPU (CUDA). For loading data on Windows OS, if you get an Access Denied or Operation '
'Not Supported for cuda, you must set --loader_num_workers to 0 '
'(you can\'t share CUDA tensors among Windows processes).')
parser.add_argument('--gpu_num', default="0", type=str)
parser.add_argument('--map_gpu_beginning', action='store_true',
help='Will map all tensors (including FULL dataset) to GPU at the start of the instance, if '
'--use_gpu flag is supplied and CUDA is available. This option is NOT recommended if you '
'have low GPU memory or if you dataset is very large, since you may quickly run out of '
'memory.')
parser.add_argument('--timing', action='store_true',
help='if specified, will display times for several parts of training')
parser.add_argument('--load_args_from_json', type=str, default=None,
help='Path to json file containing args to pass. Should be an object containing the keys of '
'the attributes you want to change (keys that you don\'t supply will be left unchanged) '
'and their values according to their type (int, str, bool, list, etc.)')
return parser
def add_parser_arguments_plotting(parser):
"""
Adds the options that te command line parser will search for, regarding configuration for plots
:param parser: the argument parser
:return: the same parser, but with the added options.
"""
# required = parser.add_argument_group('required named arguments')
parser.add_argument('--model_paths', nargs='+', type=str, # required=True,
help='List of paths to several model(s) or file(s) containing pre-computed predictions. '
'If a model does not have actual path (e.g. CV), supply the word \'none\'. '
'This list must be supplied so that the script knows where to get the predictions.')
parser.add_argument('--model_labels', nargs='+', type=str, # required=True,
help='List of labels for each of the models. It is also required. The number of labels must'
'equal to the number of paths ')
parser.add_argument('--max_trajectories', default=10, type=int,
help='Maximum number of trajectory plots to display. '
'Script will stop if this number is reached.')
parser.add_argument('--displacement_threshold', default=0, type=float,
help='Any (GT -> PAST + FUTURE) trajectory with total displacement below or equal to this '
'value will not be plotted')
parser.add_argument('--length_threshold', default=0, type=int,
help='Any primary trajectory with length below or equal to this value will not be plotted')
parser.add_argument('--distinguish_start_end', action='store_true',
help='distinguish the start and end positions in '
'the trajectories to plot')
parser.add_argument('--rotate_by', default=0, type=float,
help='Rotate the trajectories to display by a fixed angle. Note that this is not the same as '
'parameters like --random_rotate_std or --random_rotate_thresh, those are related to '
'parameters of a distribution. The angle should be supplied in radians.')
parser.add_argument('--switch_x_y', action='store_true', help='Switch x with y coordinates for all trajectories. '
'Applied after --rotate_by')
parser.add_argument('--invert_x', action='store_true',
help='For all trajectories, do x=-x. Applied after --switch_x_y')
parser.add_argument('--invert_y', action='store_true',
help='For all trajectories, do y=-y. Applied after --switch_x_y. For both biwi_eth and '
'biwi_hotel scenes, if one supplies --switch_x_y and --invert_y, the scene will be '
'oriented similar to the original video. For crowds_univ and crowds_zara, these arguments '
'need not be supplied (scenes are already oriented properly')
parser.add_argument('--plot_limits', nargs='+', type=float,
help='List with FOUR float values, to specify | |
= 1
HSM = 2
EXTERNAL = 3
ciphertext = _messages.BytesField(1)
ciphertextCrc32c = _messages.IntegerField(2)
name = _messages.StringField(3)
protectionLevel = _messages.EnumField('ProtectionLevelValueValuesEnum', 4)
verifiedAdditionalAuthenticatedDataCrc32c = _messages.BooleanField(5)
verifiedPlaintextCrc32c = _messages.BooleanField(6)
class Expr(_messages.Message):
r"""Represents a textual expression in the Common Expression Language (CEL)
syntax. CEL is a C-like expression language. The syntax and semantics of CEL
are documented at https://github.com/google/cel-spec. Example (Comparison):
title: "Summary size limit" description: "Determines if a summary is less
than 100 chars" expression: "document.summary.size() < 100" Example
(Equality): title: "Requestor is owner" description: "Determines if
requestor is the document owner" expression: "document.owner ==
request.auth.claims.email" Example (Logic): title: "Public documents"
description: "Determine whether the document should be publicly visible"
expression: "document.type != 'private' && document.type != 'internal'"
Example (Data Manipulation): title: "Notification string" description:
"Create a notification string with a timestamp." expression: "'New message
received at ' + string(document.create_time)" The exact variables and
functions that may be referenced within an expression are determined by the
service that evaluates it. See the service documentation for additional
information.
Fields:
description: Optional. Description of the expression. This is a longer
text which describes the expression, e.g. when hovered over it in a UI.
expression: Textual representation of an expression in Common Expression
Language syntax.
location: Optional. String indicating the location of the expression for
error reporting, e.g. a file name and a position in the file.
title: Optional. Title for the expression, i.e. a short string describing
its purpose. This can be used e.g. in UIs which allow to enter the
expression.
"""
description = _messages.StringField(1)
expression = _messages.StringField(2)
location = _messages.StringField(3)
title = _messages.StringField(4)
class ExternalProtectionLevelOptions(_messages.Message):
r"""ExternalProtectionLevelOptions stores a group of additional fields for
configuring a CryptoKeyVersion that are specific to the EXTERNAL protection
level.
Fields:
externalKeyUri: The URI for an external resource that this
CryptoKeyVersion represents.
"""
externalKeyUri = _messages.StringField(1)
class GenerateRandomBytesRequest(_messages.Message):
r"""Request message for KeyManagementService.GenerateRandomBytes.
Enums:
ProtectionLevelValueValuesEnum: The ProtectionLevel to use when generating
the random data. Defaults to SOFTWARE.
Fields:
lengthBytes: The length in bytes of the amount of randomness to retrieve.
Minimum 8 bytes, maximum 1024 bytes.
protectionLevel: The ProtectionLevel to use when generating the random
data. Defaults to SOFTWARE.
"""
class ProtectionLevelValueValuesEnum(_messages.Enum):
r"""The ProtectionLevel to use when generating the random data. Defaults
to SOFTWARE.
Values:
PROTECTION_LEVEL_UNSPECIFIED: Not specified.
SOFTWARE: Crypto operations are performed in software.
HSM: Crypto operations are performed in a Hardware Security Module.
EXTERNAL: Crypto operations are performed by an external key manager.
"""
PROTECTION_LEVEL_UNSPECIFIED = 0
SOFTWARE = 1
HSM = 2
EXTERNAL = 3
lengthBytes = _messages.IntegerField(1, variant=_messages.Variant.INT32)
protectionLevel = _messages.EnumField('ProtectionLevelValueValuesEnum', 2)
class GenerateRandomBytesResponse(_messages.Message):
r"""Response message for KeyManagementService.GenerateRandomBytes.
Fields:
data: The generated data.
dataCrc32c: Integrity verification field. A CRC32C checksum of the
returned GenerateRandomBytesResponse.data. An integrity check of
GenerateRandomBytesResponse.data can be performed by computing the
CRC32C checksum of GenerateRandomBytesResponse.data and comparing your
results to this field. Discard the response in case of non-matching
checksum values, and perform a limited number of retries. A persistent
mismatch may indicate an issue in your computation of the CRC32C
checksum. Note: This field is defined as int64 for reasons of
compatibility across different languages. However, it is a non-negative
integer, which will never exceed 2^32-1, and can be safely downconverted
to uint32 in languages that support this type.
"""
data = _messages.BytesField(1)
dataCrc32c = _messages.IntegerField(2)
class ImportCryptoKeyVersionRequest(_messages.Message):
r"""Request message for KeyManagementService.ImportCryptoKeyVersion.
Enums:
AlgorithmValueValuesEnum: Required. The algorithm of the key being
imported. This does not need to match the version_template of the
CryptoKey this version imports into.
Fields:
algorithm: Required. The algorithm of the key being imported. This does
not need to match the version_template of the CryptoKey this version
imports into.
cryptoKeyVersion: Optional. The optional name of an existing
CryptoKeyVersion to target for an import operation. If this field is not
present, a new CryptoKeyVersion containing the supplied key material is
created. If this field is present, the supplied key material is imported
into the existing CryptoKeyVersion. To import into an existing
CryptoKeyVersion, the CryptoKeyVersion must be a child of
ImportCryptoKeyVersionRequest.parent, have been previously created via
ImportCryptoKeyVersion, and be in DESTROYED or IMPORT_FAILED state. The
key material and algorithm must match the previous CryptoKeyVersion
exactly if the CryptoKeyVersion has ever contained key material.
importJob: Required. The name of the ImportJob that was used to wrap this
key material.
rsaAesWrappedKey: Wrapped key material produced with
RSA_OAEP_3072_SHA1_AES_256 or RSA_OAEP_4096_SHA1_AES_256. This field
contains the concatenation of two wrapped keys: 1. An ephemeral AES-256
wrapping key wrapped with the public_key using RSAES-OAEP with SHA-1,
MGF1 with SHA-1, and an empty label. 2. The key to be imported, wrapped
with the ephemeral AES-256 key using AES-KWP (RFC 5649). If importing
symmetric key material, it is expected that the unwrapped key contains
plain bytes. If importing asymmetric key material, it is expected that
the unwrapped key is in PKCS#8-encoded DER format (the PrivateKeyInfo
structure from RFC 5208). This format is the same as the format produced
by PKCS#11 mechanism CKM_RSA_AES_KEY_WRAP.
"""
class AlgorithmValueValuesEnum(_messages.Enum):
r"""Required. The algorithm of the key being imported. This does not need
to match the version_template of the CryptoKey this version imports into.
Values:
CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED: Not specified.
GOOGLE_SYMMETRIC_ENCRYPTION: Creates symmetric encryption keys.
RSA_SIGN_PSS_2048_SHA256: RSASSA-PSS 2048 bit key with a SHA256 digest.
RSA_SIGN_PSS_3072_SHA256: RSASSA-PSS 3072 bit key with a SHA256 digest.
RSA_SIGN_PSS_4096_SHA256: RSASSA-PSS 4096 bit key with a SHA256 digest.
RSA_SIGN_PSS_4096_SHA512: RSASSA-PSS 4096 bit key with a SHA512 digest.
RSA_SIGN_PKCS1_2048_SHA256: RSASSA-PKCS1-v1_5 with a 2048 bit key and a
SHA256 digest.
RSA_SIGN_PKCS1_3072_SHA256: RSASSA-PKCS1-v1_5 with a 3072 bit key and a
SHA256 digest.
RSA_SIGN_PKCS1_4096_SHA256: RSASSA-PKCS1-v1_5 with a 4096 bit key and a
SHA256 digest.
RSA_SIGN_PKCS1_4096_SHA512: RSASSA-PKCS1-v1_5 with a 4096 bit key and a
SHA512 digest.
RSA_SIGN_RAW_PKCS1_2048: RSASSA-PKCS1-v1_5 signing without encoding,
with a 2048 bit key.
RSA_SIGN_RAW_PKCS1_3072: RSASSA-PKCS1-v1_5 signing without encoding,
with a 3072 bit key.
RSA_SIGN_RAW_PKCS1_4096: RSASSA-PKCS1-v1_5 signing without encoding,
with a 4096 bit key.
RSA_DECRYPT_OAEP_2048_SHA256: RSAES-OAEP 2048 bit key with a SHA256
digest.
RSA_DECRYPT_OAEP_3072_SHA256: RSAES-OAEP 3072 bit key with a SHA256
digest.
RSA_DECRYPT_OAEP_4096_SHA256: RSAES-OAEP 4096 bit key with a SHA256
digest.
RSA_DECRYPT_OAEP_4096_SHA512: RSAES-OAEP 4096 bit key with a SHA512
digest.
RSA_DECRYPT_OAEP_2048_SHA1: RSAES-OAEP 2048 bit key with a SHA1 digest.
RSA_DECRYPT_OAEP_3072_SHA1: RSAES-OAEP 3072 bit key with a SHA1 digest.
RSA_DECRYPT_OAEP_4096_SHA1: RSAES-OAEP 4096 bit key with a SHA1 digest.
EC_SIGN_P256_SHA256: ECDSA on the NIST P-256 curve with a SHA256 digest.
EC_SIGN_P384_SHA384: ECDSA on the NIST P-384 curve with a SHA384 digest.
EC_SIGN_SECP256K1_SHA256: ECDSA on the non-NIST secp256k1 curve. This
curve is only supported for HSM protection level.
HMAC_SHA256: HMAC-SHA256 signing with a 256 bit key.
EXTERNAL_SYMMETRIC_ENCRYPTION: Algorithm representing symmetric
encryption by an external key manager.
"""
CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED = 0
GOOGLE_SYMMETRIC_ENCRYPTION = 1
RSA_SIGN_PSS_2048_SHA256 = 2
RSA_SIGN_PSS_3072_SHA256 = 3
RSA_SIGN_PSS_4096_SHA256 = 4
RSA_SIGN_PSS_4096_SHA512 = 5
RSA_SIGN_PKCS1_2048_SHA256 = 6
RSA_SIGN_PKCS1_3072_SHA256 = 7
RSA_SIGN_PKCS1_4096_SHA256 = 8
RSA_SIGN_PKCS1_4096_SHA512 = 9
RSA_SIGN_RAW_PKCS1_2048 = 10
RSA_SIGN_RAW_PKCS1_3072 = 11
RSA_SIGN_RAW_PKCS1_4096 = 12
RSA_DECRYPT_OAEP_2048_SHA256 = 13
RSA_DECRYPT_OAEP_3072_SHA256 = 14
RSA_DECRYPT_OAEP_4096_SHA256 = 15
RSA_DECRYPT_OAEP_4096_SHA512 = 16
RSA_DECRYPT_OAEP_2048_SHA1 = 17
RSA_DECRYPT_OAEP_3072_SHA1 = 18
RSA_DECRYPT_OAEP_4096_SHA1 = 19
EC_SIGN_P256_SHA256 = 20
EC_SIGN_P384_SHA384 = 21
EC_SIGN_SECP256K1_SHA256 = 22
HMAC_SHA256 = 23
EXTERNAL_SYMMETRIC_ENCRYPTION = 24
algorithm = _messages.EnumField('AlgorithmValueValuesEnum', 1)
cryptoKeyVersion = _messages.StringField(2)
importJob = _messages.StringField(3)
rsaAesWrappedKey = _messages.BytesField(4)
class ImportJob(_messages.Message):
r"""An ImportJob can be used to create CryptoKeys and CryptoKeyVersions
using pre-existing key material, generated outside of Cloud KMS. When an
ImportJob is created, Cloud KMS will generate a "wrapping key", which is a
public/private key pair. You use the wrapping key to encrypt (also known as
wrap) the pre-existing key material to protect it during the import process.
The nature of the wrapping key depends on the choice of import_method. When
the wrapping key generation is complete, the state will be set to ACTIVE and
the public_key can be fetched. The fetched public key can then be used to
wrap your pre-existing key material. Once the key material is wrapped, it
can be imported into a new CryptoKeyVersion in an existing CryptoKey by
calling ImportCryptoKeyVersion. | |
<reponame>willbr/uxn
# https://wiki.xxiivv.com/site/varvara.html
# https://wiki.xxiivv.com/site/uxntal.html
# https://wiki.xxiivv.com/site/uxntal_cheatsheet.html
# https://wiki.xxiivv.com/site/uxntal_reference.html
# https://wiki.xxiivv.com/site/uxntal_stacking.html
# https://wiki.xxiivv.com/site/uxntal_macros.html
from rich.console import Console
from rich.traceback import install
from uxn import *
import fileinput
import argparse
console = Console(markup=False)
python_print = print
print = console.print
install(show_locals=True)
gensym_counter =0
class UxnRom():
def __init__(self, filename=None):
self.pc = 0
self.scope = None
self.refs = []
self.labels = {}
self.debug = False
if filename:
self.load_rom(filename)
else:
self.rom = bytearray()
def load_rom(self, filename):
with open(filename, 'rb') as f:
self.rom = bytearray(0x100) + bytearray(f.read())
def __repr__(self):
return 'Rom: ' + ' '.join(f'{c:02x}' for c in self.rom[0x100:])
def write(self, token, note=''):
if note and self.debug:
print(f"{note:6s} {token}")
first_char = token[:1]
if first_char == '#':
n = int(token[1:], 16)
assert n >= 0
assert n <= 0xffff
if len(token) >= 4:
self.write_op('lit2')
self.write_short(n)
else:
self.write_op('lit')
self.write_byte(n)
elif first_char == '|':
n = int(token[1:], 16)
assert n < 0x10000
self.pc = n
elif first_char == '@':
label_name = token[1:]
self.make_label(label_name)
self.scope = label_name
elif first_char == '&': # sub-label define
assert self.scope != None
sub_name = token[1:]
self.make_label(self.sub_label(sub_name))
elif first_char == ';': # literal address absolute
self.make_reference(token, self.pc)
self.write_lit_short(0xffff)
elif first_char == ':': # raw address absolute
self.make_reference(token, self.pc)
self.write_short(0xffff)
elif first_char == ',': # literal address relative
self.make_reference(token, self.pc)
self.write_lit_byte(0xff)
elif first_char == '.': # zero-page address
self.make_reference(token, self.pc)
self.write_lit_byte(0xff)
elif first_char == "'":
assert len(token) == 2
c = token[1]
n = ord(c)
self.write_lit_byte(n)
elif first_char == '"':
for b in bytes(token[1:], 'ascii'):
self.write_byte(b)
elif first_char == '$':
n = int(token[1:], 16)
self.pc += n
elif first_char == '~':
assert False # todo include
elif token[:3].lower() in op_table:
self.write_op(token)
else:
n = int(token, 16)
assert n >= 0
assert n <= 0xffff
if len(token) >= 4:
self.write_short(n)
else:
self.write_byte(n)
def sub_label(self, name):
label_name = f"{self.scope}/{name}"
return label_name
def make_label(self, label_name):
assert label_name not in self.labels
self.labels[label_name] = self.pc
def make_reference(self, label, addr):
rune = label[0]
if label[1] == '&':
ref_name = self.sub_label(label[2:])
else:
ref_name = label[1:]
self.refs.append([ref_name, rune, addr])
def write_byte(self, n):
assert n >= 0
assert n <= 0xff
delta = self.pc - len(self.rom) + 1
if delta > 0:
self.rom += bytes(delta)
self.rom[self.pc] = n
self.pc += 1
def write_signed_byte(self, n):
if n < 0:
u = 255 + n
elif n > 127:
assert False
else:
u = n
self.write_byte(u)
def write_short(self, n):
assert n >= 0
assert n <= 0xffff
low = n & 0x00ff
high = n >> 8
self.write_byte(high)
self.write_byte(low)
def write_lit_byte(self, n):
self.write_op('lit')
self.write_byte(n)
def write_lit_short(self, n):
self.write_op('lit2')
self.write_short(n)
def write_op(self, op):
lhs, rhs = op[:3], op[3:]
if lhs == 'lit': # force keep for lit
if 'k' not in rhs:
rhs += 'k'
code = op_table[lhs.lower()]
for c in rhs:
if c == 'k':
code = code | 0x80
elif c == 'r':
code = code | 0x40
elif c == '2':
code = code | 0x20
else:
raise SyntaxError(f"unknown mode: {c}")
self.write_byte(code)
def resolve(self):
# print(self.labels)
for v in self.refs:
label, rune, ref_addr = v
try:
label_addr = self.labels[label]
except KeyError:
print(self.labels)
print(f"unknown label: {repr(label)}")
exit(1)
# print(label, label_addr)
# print(rune, ref_addr)
if rune == '.':
assert 0x00 <= label_addr <= 0xff
self.pc = ref_addr + 1
self.write_byte(label_addr)
elif rune == ',':
self.pc = ref_addr + 1
delta = label_addr - self.pc - 2
self.write_signed_byte(delta)
elif rune == ';':
self.pc = ref_addr + 1
self.write_short(label_addr)
elif rune == ':':
self.write_short(label_addr)
else:
assert False
def write_file(self, filename):
with open(filename, 'wb') as f:
f.write(self.rom[0x100:])
def peek(self, short_mode, offset):
if short_mode:
peek = self.peek16
else:
peek = self.peek8
peek = self.peek16 if short_mode else self.peek8
n = peek(offset)
return n
def peek8(self, offset):
n = self.rom[offset]
return n
def peek16(self, offset):
high = self.rom[offset]
low = self.rom[offset+1]
n = (high << 8) + low
return n
class Tokeniser:
def __init__(self, data):
self.i = 0
self.queued_tokens = []
self.data = data
self.whitespace = ' \n'
self.chomp_whitespace()
def chomp_whitespace(self):
try:
while self.data[self.i] in self.whitespace:
self.i += 1
except IndexError:
pass
def push_token(self, token):
self.queued_tokens.append(token)
def peek_token(self):
if self.queued_tokens:
t = self.queued_tokens[-1]
return t
t = self.read_token()
self.queued_tokens.append(t)
return t
def read_token(self):
if self.queued_tokens:
t = self.queued_tokens.pop()
return t
start_pos = self.i
try:
c = self.data[self.i]
if c == '\n':
while self.data[self.i] in '\n':
self.i += 1
elif c == '"':
self.i += 1
while self.data[self.i] not in '"':
self.i += 1
self.i += 1
elif c == "'":
self.i += 1
assert self.data[self.i] not in ' \n'
self.i += 1
assert self.data[self.i] in ' \n'
elif c in '()':
self.i += 1
else:
while self.data[self.i] not in ' \n()':
self.i += 1
except IndexError:
pass
t = self.data[start_pos:self.i]
self.chomp_whitespace()
if t.startswith('\n'):
return '\n'
return t
class ExpressionParser:
def __init__(self, data):
self.queued_tokens = []
self.tokeniser = Tokeniser(data)
self.read_raw = self.tokeniser.read_token
self.peek_raw = self.tokeniser.peek_token
def read_token(self):
if self.queued_tokens:
t = self.queued_tokens.pop(0)
return t
t = self.peek_raw()
# print(f"h {t= }")
if t == '':
return ''
else:
self.parse_expr()
if self.queued_tokens:
new_t = self.queued_tokens.pop(0)
else:
new_t = ''
return new_t
def parse_expr(self):
stack = []
t = self.read_raw()
if t == '':
assert False
elif t == '(':
stack.append(t)
elif t == ')':
assert False
else:
self.queued_tokens.append(t)
return
i = 0
op = None
while True:
p = self.peek_raw()
if p == '(':
self.parse_expr()
t = self.read_raw()
# print(f"{t = }")
if t == '':
assert False
elif t == ')':
if i % 2 == 0 and op:
self.queued_tokens.append(op)
tos = stack.pop()
assert tos == '('
prev = stack[-1] if stack else None
assert not stack
return
elif i == 0:
self.queued_tokens.append(t)
elif i == 1:
op = t
elif i % 2:
assert t == op
else:
self.queued_tokens.append(t)
self.queued_tokens.append(op)
i += 1
assert False
def assemble(rom, data):
# tok = Tokeniser(data)
# while True:
# t = tok.read_token()
# if t == '':
# break
# print(t)
# return
xp = ExpressionParser(data)
# while True:
# t = xp.read_token()
# if t == '':
# break
# print(t)
inline_words = {}
words = []
queue = []
def next_word():
if queue:
return queue.pop(0)
return xp.read_token()
def peek_word():
if queue:
return queue[0]
t = xp.read_token()
queue.append(t)
return t
def read_block(skip_open=False, open_marker='{', close_maker='}'):
depth = 0
body = []
if not skip_open:
open_word = next_word()
assert open_word == open_marker
while True:
w = next_word()
if w == open_marker:
body.append(w)
depth += 1
elif w == close_maker:
depth -= 1
if depth == -1:
break
else:
body.append(w)
elif w == '':
break
else:
body.append(w)
return body
def assemble_label(prefix, name):
nonlocal queue
p = peek_word()
if p == '{':
body = read_block()
queue = body + queue + ['label', f"end-{name}"]
# print(f"{name} {body = }")
cmd = f'{prefix}{name}'
rom.write(cmd, 'label')
while True:
w = next_word()
if w == '':
break;
first_char = w[0]
print(repr(w))
if w == '(':
assert False
comment = read_block(True, '(', ')')
assert False
elif w in '{}[]':
pass
elif w == 'inline' or first_char == '%':
if first_char == '%':
name = w[1:]
else:
name = next_word()
body = read_block()
inline_words[name] = body
elif w == 'origin':
offset = next_word()
cmd = '|' + offset
rom.write(cmd, 'set pc')
elif w == 'comment':
body = read_block()
elif w == "data":
name = next_word()
body = read_block()
cmd = '@' + name
rom.write(cmd, 'data label')
for b in body:
n = int(b, 16)
rom.write_byte(n)
elif w == "loop":
pw = peek_word()
if pw == '{':
start_marker = gensym('loop-start')
end_marker = gensym('loop-end')
else:
start_marker = next_word()
end_marker = start_marker + '-end'
cmd = '&' + start_marker
body = [cmd]
body += read_block()
cmd = ';&' + start_marker
body += [cmd, 'jmp2']
cmd = '&' + end_marker
body += [cmd]
queue = body + queue
elif w == "if":
p = peek_word()
true_clause = read_block()
assert true_clause
p = peek_word()
true_marker = gensym('if-true')
end_marker = gensym('if-end')
if p == 'else':
body = [';' + true_marker, 'jcn2']
_ = next_word()
else_clause = read_block()
body += | |
<reponame>sighill/shade_app
# -*- coding: utf-8 -*-
# voca.py
# Python 3.4.3
# Django 1.9
# Script rassemblant des fonctions python3 pour modifier les fichiers
# des dossiers ###_raw
#####################################################################
# README !
# Les fonctions suivantes sont là pour être appelées au sein d'un
# script personnalisé pour chaque mission.
# Pour importer facilement ces fonctions, CF premier snippet
# en bas de ce script.
# Implémenter un log permettant de suivre les étapes et le traitement
# de la donnée. Le garder en global pour qu'il accumule les logs
# de chaque fonction.
# Des snippets de code utile sont dispos en fin de document
# RAPPEL SUBLIMETEXT :
# Pour wrapper tout le code faire ctrl+a ctrl+k ctrl+1
#####################################################################
log = ''
headerConstant = 'HEADER;'
#####################################################################
def WorkPath():
"""
WorkPath 2016.04.09
L'utilisation de cette fonction permet d'utiliser des
chemins d'accès relatifs entre scripts et dossiers
Cette fonction détermine automatiquement où le script
est lancé :
Soit sur un pc en travail local
Soit sur le serveur avec le shell Django
"""
# TODO : remplacer le snippet de travail en dossiers relatifs
# de voca.py par cette fonction qui détecte automatiquement
# le dossier de travail !
def AddLog(log_level , str_to_add):
"""
AddLog 2016.04.08
Cette fonction gère l'ajout d'une ligne à un compte rendu
Elle gère aussi l'ajout de brackets HTML pour la mise en
forme du log au niveau du template django
log_level is either title , subtitle , corpus
"""
global log
separator = '#'
# If title, big separator and str_to_add
if log_level == 'title':
log = log + separator*70 + '\n' + str_to_add + '\n'
# If subtitle, 4 space indent, medium separator, and str_to_add
elif log_level == 'subtitle':
log = log + ' ' + separator*35 + '\n' + ' ' + str_to_add + '\n'
# If corpus, 8 spaces indent and str_to_add
elif log_level == 'corpus':
log = log + ' ' + str_to_add + '\n'
# If typo
else:
log = log + 'WARNING : bad log_level, using corpus mode'
log = log + ' ' + str_to_add + '\n'
return log
#####################################################################
def OutFileCreate(out_path,raw_filename,ref_list,header):
"""
OutFileCreate 2016.04.08
Crée les fichiers out et log au bon endroit, et les remplit.
CF shade/README.md partie II.2 pour détails
Conseil : fonction à appeler en fin de procédure
Cette fonction attend quatre arguments:
Le chemin absolu vers le dossier out WIP !
Le nom de fichier ###_raw (type attendu : string)
Le texte raffiné ref_list (type attendu : list de strings)
Le log de la procédure (type attendu : string)
"""
# Variables globales
global log
# Variables locales
file_id = raw_filename[:3]
# Création du header
headerComplete = ''
if( header == '' ):
AddLog('corpus' , 'Fichier créé sans header')
else:
headerComplete = headerConstant + header + '\n'
AddLog('corpus' , 'Le header sera: {}'.format( headerComplete ) )
# Création du fichier ###_out
# NB : l'argument w+ écrase l'ancien fichier s'il existe !
AddLog('corpus' , 'Création du fichier {}_out'.format(file_id))
with open(out_path + file_id + '_out' , 'w+') as ofi_out:
# Insertion du header seulement si il est non nul
if( headerComplete != '' ):
ofi_out.write( headerComplete )
ofi_out.write('\n'.join(ref_list))
ofi_out.close()
# Création du fichier ###_log
# NB : l'argument w+ écrase l'ancien fichier s'il existe !
AddLog('corpus' , 'Création du fichier {}_log'.format(file_id))
with open(out_path + file_id + '_log' , 'w+') as ofi_log:
ofi_log.write(log)
ofi_log.close()
#####################################################################
def StringFormatter(raw_str):
"""
StringFormatter 2016.04.08
Fonction de modification d'une string de n'importe quel type
pour la mettre en forme selon le standard de l'app primus.
Standard primus : Une majuscule en début de nom. Si nom
composé, majuscule au début de chaque partie.
"""
# TODO : ajouter un convertisseur de caractères spéciaux.
# Exemple : ` --> '
# Variables globales
global log
# Mise en forme : 'FoO-BAr' --> 'Foo-Bar'
ref_str = raw_str.title()
# Ecriture du log
AddLog( 'corpus' , '{} --> {}.'.format(raw_str , ref_str))
return ref_str
#####################################################################
def StrValidator(list): # WIP NE PAS UTILISER
"""
StrValidator 2016.04.09
Cette fonction permet de valider rapidement chaque entrée
d'une liste manuellement.
Elle prompte chaque ligne et attend un input.
Input vide : validé
Input non vide : éliminé
"""
# TODO : fonction de correction : utiliser while not line_corr_valid_ok
# c'est sale et pas solide. A améliorer !
# Variables globales
global log
# Variables locales
out_list = []
counter_valid = 0
counter_corr = 0
counter_eliminated = 0
print('StrValidator - inputs possibles : \n Vide : string validé.\n c : correction manuelle.\n Tout autre input : string éliminé')
# Pour chaque ligne de list, prompt et attendre input.
for line in list:
# Demande d'input
key_input = input(line + ' : ')
# Si input vide, string éliminée si pas vide, string gardée
if not key_input :
out_list.append(line)
counter_valid += 1
# Si correction, input demandé, confirmé, et ajouté.
elif key_input in ['c','C']:
line_corr_valid_ok = False
while not line_corr_valid_ok:
line_corr = input('Correction de {}: '.format(line))
line_corr_valid = input('Validez vous {} ? o/n : '.format(line_corr))
if line_corr_valid in ['o','O','y','Y']:
out_list.append(line_corr_valid)
line_corr_valid_ok = True
counter_corr += 1
else:
continue
# Si input différent de vide ou 'c', string confirmé et éliminé.
else:
print('String éliminé.')
counter_eliminated += 1
# Ajout du log
AddLog('corpus', 'Lignes validées : {}'.format(counter_valid))
AddLog('corpus', 'Lignes corrigées : {}'.format(counter_corr))
AddLog('corpus', 'Lignes éliminées : {}'.format(counter_eliminated))
return out_list
#####################################################################
def OdditiesFinder(raw_list):
'''
OdditiesFinder 2016.04.12
Cherche dans la string d'entrée des caractères non prévus
et pas acceptables dans la db primus.
Chaque ligne (string) est transformée en liste de lettres (list)
Chaque lettre est comparée à la liste des lettres acceptées.
Si problème, prompt pour avoir la lettre de remplacement
Laisser vide pour suppression de la lettre merdique.
'''
# TODO : tester la fonction avec un insert de plusieurs lettres
# dans le cas d'un remplacement de lettre --> plusieurs lettres
# Variables globales
global log
# Variables locales
ref_line_list = []
acceptable_char = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',
'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I',
'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z', '\'' , ' ' ]
# Pour chaque ligne, déconstruction lettre par lettre et
# comparaison de chaque lettre à un dico personnalisé
for line in raw_list:
# Passage de string à liste de lettres
letter_list = list(line)
curseur = 0
for letter in letter_list:
if letter not in acceptable_char:
replacement_letter = input('Bizarrerie trouvée : \' {} \' dans \' {} \'. Remplacer par : '.format(letter , line))
letter_list[curseur] = replacement_letter
AddLog('corpus' , '{} : Modification de la lettre : {} en {}'.format(line , letter , replacement_letter))
else:
pass
curseur += 1
# Reconstruction de la string à partir de la liste de lettres
line = ''.join(letter_list)
#Ajout de la string dans la liste de sortie
ref_line_list.append(line)
return ref_line_list
#####################################################################
# SNIPPETS DE CODE UTILES
#####################################################################
'''
# Ajout du répertoire de travail local pour travailler en système
# de fichiers relatifs et importer les fonctions voca facilement
import sys
sys.path.insert(0 , 'D:/Projets/shade_django/apis/')
from voca import AddLog , StringFormatter , OutFileCreate
'''
'''
# créer une liste comportant toutes les lignes du fichier
line_list = ofi.read().splitlines()
# read() importe les lignes
# splitlines() supprime le caractère de retour à la ligne
'''
# Ouvrir un fichier et en tirer des lignes sans le caractère
# spécial \n qui signifie un retour à la ligne :
# ofi = open('path_to_file'+'file_name' , 'option')
# CONSEIL : WRAPPE CE COMMENT ! option est soit :
# r Opens a file for reading only. The file pointer is placed
# at the beginning of the file. This is the default mode.
# r+ Opens a file for both reading and writing. The file pointer
# placed at the beginning of the file.
# w Opens a |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.