repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
daedric/buck | third-party/py/unittest2/unittest2/test/test_program.py | 121 | 7252 | from cStringIO import StringIO
import sys
import unittest2
hasInstallHandler = hasattr(unittest2, 'installHandler')
class Test_TestProgram(unittest2.TestCase):
# Horrible white box test
def testNoExit(self):
result = object()
test = object()
class FakeRunner(object):
def run(self, test):
self.test = test
return result
runner = FakeRunner()
oldParseArgs = unittest2.TestProgram.parseArgs
def restoreParseArgs():
unittest2.TestProgram.parseArgs = oldParseArgs
unittest2.TestProgram.parseArgs = lambda *args: None
self.addCleanup(restoreParseArgs)
def removeTest():
del unittest2.TestProgram.test
unittest2.TestProgram.test = test
self.addCleanup(removeTest)
program = unittest2.TestProgram(testRunner=runner, exit=False, verbosity=2)
self.assertEqual(program.result, result)
self.assertEqual(runner.test, test)
self.assertEqual(program.verbosity, 2)
class FooBar(unittest2.TestCase):
def testPass(self):
assert True
def testFail(self):
assert False
class FooBarLoader(unittest2.TestLoader):
"""Test loader that returns a suite containing FooBar."""
def loadTestsFromModule(self, module):
return self.suiteClass(
[self.loadTestsFromTestCase(Test_TestProgram.FooBar)])
def test_NonExit(self):
program = unittest2.main(exit=False,
argv=["foobar"],
testRunner=unittest2.TextTestRunner(stream=StringIO()),
testLoader=self.FooBarLoader())
self.assertTrue(hasattr(program, 'result'))
def test_Exit(self):
self.assertRaises(
SystemExit,
unittest2.main,
argv=["foobar"],
testRunner=unittest2.TextTestRunner(stream=StringIO()),
exit=True,
testLoader=self.FooBarLoader())
def test_ExitAsDefault(self):
self.assertRaises(
SystemExit,
unittest2.main,
argv=["foobar"],
testRunner=unittest2.TextTestRunner(stream=StringIO()),
testLoader=self.FooBarLoader())
class InitialisableProgram(unittest2.TestProgram):
exit = False
result = None
verbosity = 1
defaultTest = None
testRunner = None
testLoader = unittest2.defaultTestLoader
progName = 'test'
test = 'test'
def __init__(self, *args):
pass
RESULT = object()
class FakeRunner(object):
initArgs = None
test = None
raiseError = False
def __init__(self, **kwargs):
FakeRunner.initArgs = kwargs
if FakeRunner.raiseError:
FakeRunner.raiseError = False
raise TypeError
def run(self, test):
FakeRunner.test = test
return RESULT
class TestCommandLineArgs(unittest2.TestCase):
def setUp(self):
self.program = InitialisableProgram()
self.program.createTests = lambda: None
FakeRunner.initArgs = None
FakeRunner.test = None
FakeRunner.raiseError = False
def testHelpAndUnknown(self):
program = self.program
def usageExit(msg=None):
program.msg = msg
program.exit = True
program.usageExit = usageExit
for opt in '-h', '-H', '--help':
program.exit = False
program.parseArgs([None, opt])
self.assertTrue(program.exit)
self.assertIsNone(program.msg)
program.parseArgs([None, '-$'])
self.assertTrue(program.exit)
self.assertIsNotNone(program.msg)
def testVerbosity(self):
program = self.program
for opt in '-q', '--quiet':
program.verbosity = 1
program.parseArgs([None, opt])
self.assertEqual(program.verbosity, 0)
for opt in '-v', '--verbose':
program.verbosity = 1
program.parseArgs([None, opt])
self.assertEqual(program.verbosity, 2)
def testBufferCatchFailfast(self):
program = self.program
for arg, attr in (('buffer', 'buffer'), ('failfast', 'failfast'),
('catch', 'catchbreak')):
if attr == 'catch' and not hasInstallHandler:
continue
short_opt = '-%s' % arg[0]
long_opt = '--%s' % arg
for opt in short_opt, long_opt:
setattr(program, attr, None)
program.parseArgs([None, opt])
self.assertTrue(getattr(program, attr))
for opt in short_opt, long_opt:
not_none = object()
setattr(program, attr, not_none)
program.parseArgs([None, opt])
self.assertEqual(getattr(program, attr), not_none)
def testRunTestsRunnerClass(self):
program = self.program
program.testRunner = FakeRunner
program.verbosity = 'verbosity'
program.failfast = 'failfast'
program.buffer = 'buffer'
program.runTests()
self.assertEqual(FakeRunner.initArgs, {'verbosity': 'verbosity',
'failfast': 'failfast',
'buffer': 'buffer'})
self.assertEqual(FakeRunner.test, 'test')
self.assertIs(program.result, RESULT)
def testRunTestsRunnerInstance(self):
program = self.program
program.testRunner = FakeRunner()
FakeRunner.initArgs = None
program.runTests()
# A new FakeRunner should not have been instantiated
self.assertIsNone(FakeRunner.initArgs)
self.assertEqual(FakeRunner.test, 'test')
self.assertIs(program.result, RESULT)
def testRunTestsOldRunnerClass(self):
program = self.program
FakeRunner.raiseError = True
program.testRunner = FakeRunner
program.verbosity = 'verbosity'
program.failfast = 'failfast'
program.buffer = 'buffer'
program.test = 'test'
program.runTests()
# If initialising raises a type error it should be retried
# without the new keyword arguments
self.assertEqual(FakeRunner.initArgs, {})
self.assertEqual(FakeRunner.test, 'test')
self.assertIs(program.result, RESULT)
def testCatchBreakInstallsHandler(self):
module = sys.modules['unittest2.main']
original = module.installHandler
def restore():
module.installHandler = original
self.addCleanup(restore)
self.installed = False
def fakeInstallHandler():
self.installed = True
module.installHandler = fakeInstallHandler
program = self.program
program.catchbreak = True
program.testRunner = FakeRunner
program.runTests()
self.assertTrue(self.installed)
if __name__ == '__main__':
unittest2.main()
| apache-2.0 |
johngian/remo | vendor-local/lib/python/tablib/packages/unicodecsv/__init__.py | 52 | 3434 | # -*- coding: utf-8 -*-
import csv
from csv import *
#http://semver.org/
VERSION = (0, 8, 0)
__version__ = ".".join(map(str,VERSION))
def _stringify(s, encoding):
if type(s)==unicode:
return s.encode(encoding)
elif isinstance(s, (int , float)):
pass #let csv.QUOTE_NONNUMERIC do its thing.
elif type(s) != str:
s=str(s)
return s
def _stringify_list(l, encoding):
return [_stringify(s, encoding) for s in l]
class UnicodeWriter(object):
"""
>>> import unicodecsv
>>> from cStringIO import StringIO
>>> f = StringIO()
>>> w = unicodecsv.writer(f, encoding='utf-8')
>>> w.writerow((u'é', u'ñ'))
>>> f.seek(0)
>>> r = unicodecsv.reader(f, encoding='utf-8')
>>> row = r.next()
>>> print row[0], row[1]
é ñ
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
self.writer = csv.writer(f)
self.dialect = dialect
self.encoding = encoding
self.writer = csv.writer(f, dialect=dialect, **kwds)
def writerow(self, row):
self.writer.writerow(_stringify_list(row, self.encoding))
def writerows(self, rows):
for row in rows:
self.writerow(row)
writer = UnicodeWriter
class UnicodeReader(object):
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
self.reader = csv.reader(f, dialect=dialect, **kwds)
self.encoding = encoding
def next(self):
row = self.reader.next()
return [unicode(s, self.encoding) for s in row]
def __iter__(self):
return self
reader = UnicodeReader
class DictWriter(csv.DictWriter):
"""
>>> from cStringIO import StringIO
>>> f = StringIO()
>>> w = DictWriter(f, ['a', 'b'], restval=u'î')
>>> w.writerow({'a':'1'})
>>> w.writerow({'a':'1', 'b':u'ø'})
>>> w.writerow({'a':u'é'})
>>> f.seek(0)
>>> r = DictReader(f, fieldnames=['a'], restkey='r')
>>> r.next() == {'a':u'1', 'r':[u"î"]}
True
>>> r.next() == {'a':u'1', 'r':[u"ø"]}
True
>>> r.next() == {'a':u'é', 'r':[u"î"]}
"""
def __init__(self, csvfile, fieldnames, restval='', extrasaction='raise', dialect='excel', encoding='utf-8', *args, **kwds):
self.fieldnames = fieldnames
self.encoding = encoding
self.restval = restval
self.writer = csv.DictWriter(csvfile, fieldnames, restval, extrasaction, dialect, *args, **kwds)
def writerow(self, d):
for fieldname in self.fieldnames:
if fieldname in d:
d[fieldname] = _stringify(d[fieldname], self.encoding)
else:
d[fieldname] = _stringify(self.restval, self.encoding)
self.writer.writerow(d)
class DictReader(csv.DictReader):
def __init__(self, csvfile, fieldnames=None, restkey=None, restval=None, dialect='excel', encoding='utf-8', *args, **kwds):
self.restkey = restkey
self.encoding = encoding
self.reader = csv.DictReader(csvfile, fieldnames, restkey, restval, dialect, *args, **kwds)
def next(self):
d = self.reader.next()
for k, v in d.items():
if k == self.restkey:
rest = v
if rest:
d[self.restkey] = [unicode(v, self.encoding) for v in rest]
else:
if v is not None:
d[k] = unicode(v, self.encoding)
return d
| bsd-3-clause |
LogicalDash/kivy | kivy/tests/perf_test_textinput.py | 21 | 6625 | from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.lang import Builder
from kivy.resources import resource_find
from kivy.clock import Clock
import timeit
Builder.load_string('''
<PerfApp>:
value: 0
but: but.__self__
slider: slider
text_input: text_input
BoxLayout:
orientation: 'vertical'
TextInput:
id: text_input
BoxLayout:
orientation: 'vertical'
size_hint: 1, .2
BoxLayout:
Button:
id: but
text: 'Start Test'
on_release: root.start_test() if self.text == 'Start Test'\
else ''
Slider:
id: slider
min: 0
max: 100
value: root.value
''')
class PerfApp(App, FloatLayout):
def build(self):
return self
def __init__(self, **kwargs):
super(PerfApp, self).__init__(**kwargs)
self.tests = []
tests = (self.load_large_text, self.stress_insert,
self.stress_del, self.stress_selection)
for test in tests:
but = type(self.but)(text=test.__name__)
self.but.parent.add_widget(but)
but.test = test
self.tests.append(but)
self.test_done = True
def load_large_text(self, *largs):
print('loading uix/textinput.py....')
self.test_done = False
fd = open(resource_find('uix/textinput.py'), 'r')
print('putting text in textinput')
def load_text(*l):
self.text_input.text = fd.read()
t = timeit.Timer(load_text)
ttk = t.timeit(1)
fd.close()
import resource
print('mem usage after test')
print(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024, 'MB')
print('------------------------------------------')
print('Loaded', len(self.text_input._lines), 'lines', ttk, 'secs')
print('------------------------------------------')
self.test_done = True
def stress_del(self, *largs):
self.test_done = False
text_input = self.text_input
self.lt = len_text = len(text_input.text)
target = len_text - (210 * 9)
self.tot_time = 0
ev = None
def dlt(*l):
if len(text_input.text) <= target:
ev.cancel()
print('Done!')
m_len = len(text_input._lines)
print('deleted 210 characters 9 times')
import resource
print('mem usage after test')
print(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss /
1024, 'MB')
print('total lines in text input:', m_len)
print('--------------------------------------')
print('total time elapsed:', self.tot_time)
print('--------------------------------------')
self.test_done = True
return
text_input.select_text(self.lt - 220, self.lt - 10)
text_input.delete_selection()
self.lt -= 210
text_input.scroll_y -= 100
self.tot_time += l[0]
ev()
ev = Clock.create_trigger(dlt)
ev()
def stress_insert(self, *largs):
self.test_done = False
text_input = self.text_input
text_input.select_all()
text_input._copy(text_input.selection_text)
text_input.cursor = text_input.get_cursor_from_index(
text_input.selection_to)
len_text = len(text_input._lines)
self.tot_time = 0
ev = None
def pste(*l):
if len(text_input._lines) >= (len_text) * 9:
ev.cancel()
print('Done!')
m_len = len(text_input._lines)
print('pasted', len_text, 'lines',
round((m_len - len_text) / len_text), 'times')
import resource
print('mem usage after test')
print(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss /
1024, 'MB')
print('total lines in text input:', m_len)
print('--------------------------------------')
print('total time elapsed:', self.tot_time)
print('--------------------------------------')
self.test_done = True
return
self.tot_time += l[0]
text_input._paste()
ev()
ev = Clock.create_trigger(pste)
ev()
def stress_selection(self, *largs):
self.test_done = False
text_input = self.text_input
self.tot_time = 0
old_selection_from = text_input.selection_from - 210
ev = None
def pste(*l):
if text_input.selection_from >= old_selection_from:
ev.cancel()
print('Done!')
import resource
print('mem usage after test')
print(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss /
1024, 'MB')
print('--------------------------------------')
print('total time elapsed:', self.tot_time)
print('--------------------------------------')
self.test_done = True
return
text_input.select_text(text_input.selection_from - 1,
text_input.selection_to)
ev()
ev = Clock.create_trigger(pste)
ev()
def start_test(self, *largs):
self.but.text = 'test started'
self.slider.max = len(self.tests)
ev = None
def test(*l):
if self.test_done:
try:
but = self.tests[int(self.slider.value)]
self.slider.value += 1
but.state = 'down'
print('=====================')
print('Test:', but.text)
print('=====================')
but.test(but)
except IndexError:
for but in self.tests:
but.state = 'normal'
self.but.text = 'Start Test'
self.slider.value = 0
print('===================')
print('All Tests Completed')
print('===================')
ev.cancel()
ev = Clock.schedule_interval(test, 1)
if __name__ in ('__main__', ):
PerfApp().run()
| mit |
Xykon/pycom-micropython-sigfox | esp32/tools/pypic.py | 2 | 5197 | #!/usr/bin/env python
from __future__ import print_function
import sys
import argparse
import errno
import serial
import struct
import time
__version__ = '0.9.2'
CMD_PEEK = (0x0)
CMD_POKE = (0x01)
CMD_MAGIC = (0x02)
CMD_HW_VER = (0x10)
CMD_FW_VER = (0x11)
CMD_PROD_ID = (0x12)
CMD_SETUP_SLEEP = (0x20)
CMD_GO_SLEEP = (0x21)
CMD_CALIBRATE = (0x22)
CMD_BAUD_CHANGE = (0x30)
CMD_DFU = (0x31)
ANSELA_ADDR = (0x18C)
ANSELB_ADDR = (0x18D)
ANSELC_ADDR = (0x18E)
ADCON0_ADDR = (0x9D)
ADCON1_ADDR = (0x9E)
IOCAP_ADDR = (0x391)
IOCAN_ADDR = (0x392)
_ADCON0_CHS_POSN = (0x02)
_ADCON0_ADON_MASK = (0x01)
_ADCON1_ADCS_POSN = (0x04)
_ADCON0_GO_nDONE_MASK = (0x02)
ADRESL_ADDR = (0x09B)
ADRESH_ADDR = (0x09C)
TRISC_ADDR = (0x08E)
PORTA_ADDR = (0x00C)
PORTC_ADDR = (0x00E)
WPUA_ADDR = (0x20C)
PCON_ADDR = (0x096)
STATUS_ADDR = (0x083)
# helper functions
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def log(*args):
print(' '.join(str(a) for a in args))
def error(msg):
eprint('error:', msg)
def exit_with_error(code, msg):
error(msg)
sys.exit(code)
def warn(msg):
eprint('warning:', msg)
class Pypic:
def __init__(self, port):
# we need bytesize to be 5 bits in order for the PIC to process the commands
self.serial = serial.Serial(port, baudrate=115200, bytesize=serial.FIVEBITS, timeout=0.25)
self.detected = False
try:
if self.read_fw_version() < 6:
raise ValueError('PIC firmware out of date')
else:
self.detected = True
except Exception:
pass
def _write(self, data, read=True):
self.serial.write(data)
if read:
r_data = self.serial.read(2)
if not r_data:
raise Exception('Timeout while waiting for Rx data')
return struct.unpack('B', r_data[0])[0]
def _send_cmd(self, cmd):
return self._write(bytearray([cmd]))
def read_hw_version(self):
return self._send_cmd(CMD_HW_VER)
def read_fw_version(self):
return self._send_cmd(CMD_FW_VER)
def read_product_id(self):
return self._send_cmd(CMD_PROD_ID)
def peek_memory(self, addr):
return self._write(bytearray([CMD_PEEK, addr & 0xFF, (addr >> 8) & 0xFF]))
def poke_memory(self, addr, value):
self._write(bytearray([CMD_POKE, addr & 0xFF, (addr >> 8) & 0xFF, value & 0xFF]), False)
def magic_write_read(self, addr, _and=0xFF, _or=0, _xor=0):
return self._write(bytearray([CMD_MAGIC, addr & 0xFF, (addr >> 8) & 0xFF, _and & 0xFF, _or & 0xFF, _xor & 0xFF]))
def magic_write(self, addr, _and=0xFF, _or=0, _xor=0):
self._write(bytearray([CMD_MAGIC, addr & 0xFF, (addr >> 8) & 0xFF, _and & 0xFF, _or & 0xFF, _xor & 0xFF]), False)
def toggle_bits_in_memory(self, addr, bits):
self.magic_write(addr, _xor=bits)
def mask_bits_in_memory(self, addr, mask):
self.magic_write(addr, _and=mask)
def set_bits_in_memory(self, addr, bits):
self.magic_write(addr, _or=bits)
def reset_pycom_module(self):
# make RC5 an output
self.mask_bits_in_memory(TRISC_ADDR, ~(1 << 5))
# drive RC5 low
self.mask_bits_in_memory(PORTC_ADDR, ~(1 << 5))
time.sleep(0.2)
# drive RC5 high
self.set_bits_in_memory(PORTC_ADDR, 1 << 5)
time.sleep(0.1)
# make RC5 an input
self.set_bits_in_memory(TRISC_ADDR, 1 << 5)
def enter_pycom_programming_mode(self):
# make RC0 an output
self.mask_bits_in_memory(TRISC_ADDR, ~(1 << 0))
# set RC0 low
self.mask_bits_in_memory(PORTC_ADDR, ~(1 << 0))
# perform reset
self.reset_pycom_module()
# We should keep RC0 low at this point in case someone
# presses the reset button before the firmware upgrade
# as this is mandatory for the regular expansion board
def exit_pycom_programming_mode(self):
# make RC0 an input
# This will prevent issues with the RGB LED
self.set_bits_in_memory(TRISC_ADDR, 1 << 0)
self.reset_pycom_module()
def isdetected(self):
return self.detected
def close(self):
self.serial.close()
def main(args):
parser = argparse.ArgumentParser(description='Sends internal commands to put the Pycom module in programming mode')
parser.add_argument('-p', '--port', metavar='PORT', help='the serial port used to communicate with the PIC')
parser.add_argument('--enter', action='store_true', help='enter programming mode')
parser.add_argument('--exit', action='store_true', help='exit programming mode')
args = parser.parse_args()
if not args.port:
exit_with_error(1, 'no serial port specified')
if (args.enter and args.exit) or (not args.enter and not args.exit):
exit_with_error(1, 'invalid action requested')
pic = Pypic(args.port)
if pic.isdetected():
if args.enter:
pic.enter_pycom_programming_mode()
elif args.exit:
pic.exit_pycom_programming_mode()
pic.close()
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| mit |
ahuarte47/QGIS | tests/src/python/test_qgsvaliditychecks.py | 45 | 5681 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for validity checks
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '03/12/2018'
__copyright__ = 'Copyright 2018, The QGIS Project'
import qgis # NOQA
from qgis.core import (QgsApplication,
QgsAbstractValidityCheck,
QgsValidityCheckRegistry,
QgsValidityCheckResult,
QgsValidityCheckContext,
QgsFeedback,
check)
from qgis.testing import start_app, unittest
app = start_app()
class TestCheck(QgsAbstractValidityCheck):
def __init__(self, id, name, type, results):
super().__init__()
self._name = name
self._id = id
self._type = type
self._results = results
def create(self):
return TestCheck(self._id, self._name, self._type, self._results)
def id(self):
return self._id
def checkType(self):
return self._type
def runCheck(self, _, __):
return self._results
class TestContext(QgsValidityCheckContext):
def type(self):
return 0
# register some checks using the decorator syntax
@check.register(type=QgsAbstractValidityCheck.TypeLayoutCheck)
def my_check(context, feedback):
assert context
@check.register(type=QgsAbstractValidityCheck.TypeLayoutCheck)
def my_check2(context, feedback):
res = QgsValidityCheckResult()
res.type = QgsValidityCheckResult.Warning
res.title = 'test'
res.detailedDescription = 'blah blah'
return [res]
class TestQgsValidityChecks(unittest.TestCase):
def testAppRegistry(self):
# ensure there is an application instance
self.assertIsNotNone(QgsApplication.validityCheckRegistry())
def testDecorator(self):
# test that checks registered using the decorator have worked
self.assertEqual(len(QgsApplication.validityCheckRegistry().checks()), 2)
context = TestContext()
feedback = QgsFeedback()
res = QgsApplication.validityCheckRegistry().runChecks(QgsAbstractValidityCheck.TypeLayoutCheck, context, feedback)
self.assertEqual(len(res), 1)
self.assertEqual(res[0].title, 'test')
def testRegistry(self):
registry = QgsValidityCheckRegistry()
self.assertFalse(registry.checks())
# add a new check
c1 = TestCheck('c1', 'my check', 1, [])
registry.addCheck(c1)
self.assertEqual(registry.checks(), [c1])
c2 = TestCheck('c2', 'my check2', 1, [])
registry.addCheck(c2)
self.assertEqual(registry.checks(), [c1, c2])
registry.removeCheck(None)
c3 = TestCheck('c3', 'my check3', 1, [])
# not in registry yet
registry.removeCheck(c3)
registry.removeCheck(c1)
self.assertEqual(registry.checks(), [c2])
registry.removeCheck(c2)
self.assertFalse(registry.checks())
def testRegistryChecks(self):
registry = QgsValidityCheckRegistry()
c1 = TestCheck('c1', 'my check', 1, [])
registry.addCheck(c1)
c2 = TestCheck('c2', 'my check2', 2, [])
registry.addCheck(c2)
c3 = TestCheck('c3', 'my check3', 1, [])
registry.addCheck(c3)
self.assertFalse(registry.checks(0))
self.assertEqual(registry.checks(1), [c1, c3])
self.assertEqual(registry.checks(2), [c2])
def testRunChecks(self):
registry = QgsValidityCheckRegistry()
res1 = QgsValidityCheckResult()
res1.type = QgsValidityCheckResult.Warning
res1.title = 'test'
res1.detailedDescription = 'blah blah'
c1 = TestCheck('c1', 'my check', 1, [res1])
registry.addCheck(c1)
res2 = QgsValidityCheckResult()
res2.type = QgsValidityCheckResult.Critical
res2.title = 'test2'
res2.detailedDescription = 'blah blah2'
c2 = TestCheck('c2', 'my check2', 2, [res2])
registry.addCheck(c2)
res3 = QgsValidityCheckResult()
res3.type = QgsValidityCheckResult.Warning
res3.title = 'test3'
res3.detailedDescription = 'blah blah3'
res4 = QgsValidityCheckResult()
res4.type = QgsValidityCheckResult.Warning
res4.title = 'test4'
res4.detailedDescription = 'blah blah4'
c3 = TestCheck('c3', 'my check3', 1, [res3, res4])
registry.addCheck(c3)
context = TestContext()
feedback = QgsFeedback()
self.assertFalse(registry.runChecks(0, context, feedback))
self.assertEqual([r.type for r in registry.runChecks(1, context, feedback)],
[QgsValidityCheckResult.Warning, QgsValidityCheckResult.Warning,
QgsValidityCheckResult.Warning])
self.assertEqual([r.title for r in registry.runChecks(1, context, feedback)], ['test', 'test3', 'test4'])
self.assertEqual([r.detailedDescription for r in registry.runChecks(1, context, feedback)],
['blah blah', 'blah blah3', 'blah blah4'])
self.assertEqual([r.type for r in registry.runChecks(2, context, feedback)], [QgsValidityCheckResult.Critical])
self.assertEqual([r.title for r in registry.runChecks(2, context, feedback)], ['test2'])
self.assertEqual([r.detailedDescription for r in registry.runChecks(2, context, feedback)], ['blah blah2'])
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
rrooij/youtube-dl | youtube_dl/extractor/bleacherreport.py | 39 | 4164 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from .amp import AMPIE
from ..utils import (
ExtractorError,
int_or_none,
parse_iso8601,
)
class BleacherReportIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?bleacherreport\.com/articles/(?P<id>\d+)'
_TESTS = [{
'url': 'http://bleacherreport.com/articles/2496438-fsu-stat-projections-is-jalen-ramsey-best-defensive-player-in-college-football',
'md5': 'a3ffc3dc73afdbc2010f02d98f990f20',
'info_dict': {
'id': '2496438',
'ext': 'mp4',
'title': 'FSU Stat Projections: Is Jalen Ramsey Best Defensive Player in College Football?',
'uploader_id': 3992341,
'description': 'CFB, ACC, Florida State',
'timestamp': 1434380212,
'upload_date': '20150615',
'uploader': 'Team Stream Now ',
},
'add_ie': ['Ooyala'],
}, {
'url': 'http://bleacherreport.com/articles/2586817-aussie-golfers-get-fright-of-their-lives-after-being-chased-by-angry-kangaroo',
'md5': '6a5cd403418c7b01719248ca97fb0692',
'info_dict': {
'id': '2586817',
'ext': 'webm',
'title': 'Aussie Golfers Get Fright of Their Lives After Being Chased by Angry Kangaroo',
'timestamp': 1446839961,
'uploader': 'Sean Fay',
'description': 'md5:b1601e2314c4d8eec23b6eafe086a757',
'uploader_id': 6466954,
'upload_date': '20151011',
},
'add_ie': ['Youtube'],
}]
def _real_extract(self, url):
article_id = self._match_id(url)
article_data = self._download_json('http://api.bleacherreport.com/api/v1/articles/%s' % article_id, article_id)['article']
thumbnails = []
primary_photo = article_data.get('primaryPhoto')
if primary_photo:
thumbnails = [{
'url': primary_photo['url'],
'width': primary_photo.get('width'),
'height': primary_photo.get('height'),
}]
info = {
'_type': 'url_transparent',
'id': article_id,
'title': article_data['title'],
'uploader': article_data.get('author', {}).get('name'),
'uploader_id': article_data.get('authorId'),
'timestamp': parse_iso8601(article_data.get('createdAt')),
'thumbnails': thumbnails,
'comment_count': int_or_none(article_data.get('commentsCount')),
'view_count': int_or_none(article_data.get('hitCount')),
}
video = article_data.get('video')
if video:
video_type = video['type']
if video_type == 'cms.bleacherreport.com':
info['url'] = 'http://bleacherreport.com/video_embed?id=%s' % video['id']
elif video_type == 'ooyala.com':
info['url'] = 'ooyala:%s' % video['id']
elif video_type == 'youtube.com':
info['url'] = video['id']
elif video_type == 'vine.co':
info['url'] = 'https://vine.co/v/%s' % video['id']
else:
info['url'] = video_type + video['id']
return info
else:
raise ExtractorError('no video in the article', expected=True)
class BleacherReportCMSIE(AMPIE):
_VALID_URL = r'https?://(?:www\.)?bleacherreport\.com/video_embed\?id=(?P<id>[0-9a-f-]{36})'
_TESTS = [{
'url': 'http://bleacherreport.com/video_embed?id=8fd44c2f-3dc5-4821-9118-2c825a98c0e1',
'md5': '2e4b0a997f9228ffa31fada5c53d1ed1',
'info_dict': {
'id': '8fd44c2f-3dc5-4821-9118-2c825a98c0e1',
'ext': 'flv',
'title': 'Cena vs. Rollins Would Expose the Heavyweight Division',
'description': 'md5:984afb4ade2f9c0db35f3267ed88b36e',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
info = self._extract_feed_info('http://cms.bleacherreport.com/media/items/%s/akamai.json' % video_id)
info['id'] = video_id
return info
| unlicense |
su2code/SU2 | SU2_PY/discrete_adjoint.py | 1 | 7832 | #!/usr/bin/env python
## \file discrete_adjoint.py
# \brief Python script for doing the discrete adjoint computation using the SU2 suite.
# \author F. Palacios, T. Economon, T. Lukaczyk
# \version 7.1.1 "Blackbird"
#
# SU2 Project Website: https://su2code.github.io
#
# The SU2 Project is maintained by the SU2 Foundation
# (http://su2foundation.org)
#
# Copyright 2012-2020, SU2 Contributors (cf. AUTHORS.md)
#
# SU2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# SU2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SU2. If not, see <http://www.gnu.org/licenses/>.
import os, sys, copy
from optparse import OptionParser
sys.path.append(os.environ['SU2_RUN'])
import SU2
# -------------------------------------------------------------------
# Main
# -------------------------------------------------------------------
def main():
# Command Line Options
parser=OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="read config from FILE", metavar="FILE")
parser.add_option("-n", "--partitions", dest="partitions", default=1,
help="number of PARTITIONS", metavar="PARTITIONS")
parser.add_option("-s", "--step", dest="step", default=1E-4,
help="DOT finite difference STEP", metavar="STEP")
parser.add_option("-v", "--validate", dest="validate", default="False",
help="Validate the gradient using direct diff. mode", metavar="VALIDATION")
parser.add_option("-z", "--zones", dest="nzones", default="1",
help="Number of Zones", metavar="ZONES")
parser.add_option("-m", "--mode", dest="mode", default="all",
help="Determine the calculation mode \n <all> : compute primal & adjoint problem & gradient (DEFAULT) \n <adj> : compute adjoint (with primal restart) & gradient \n <grad>: compute gradient (with primal and adjoint restarts)", metavar="MODE")
(options, args)=parser.parse_args()
options.partitions = int( options.partitions )
options.step = float( options.step )
options.validate = options.validate.upper() == 'TRUE'
options.nzones = int( options.nzones )
if options.mode != "all" and options.mode != "adj" and options.mode != "grad":
sys.exit('Infeasible input for --mode. Use --help for more information')
discrete_adjoint( options.filename ,
options.partitions ,
options.step ,
options.nzones ,
options.mode)
#: def main()
# -------------------------------------------------------------------
# Discrete Adjoint
# -------------------------------------------------------------------
def discrete_adjoint( filename ,
partitions = 0 ,
step = 1e-4 ,
nzones = 1,
mode = "all"):
# Config
config = SU2.io.Config(filename)
config.NUMBER_PART = partitions
config.NZONES = int( nzones )
# State
state = SU2.io.State()
config['GRADIENT_METHOD'] = 'DISCRETE_ADJOINT'
# check for existing files
if mode == "grad":
config.RESTART_SOL = 'YES'
state.find_files(config)
else:
state.FILES.MESH = config.MESH_FILENAME
# Tranfer Convergence Data, if necessary
konfig = copy.deepcopy(config)
# Direct Solution
if mode == "all":
info = SU2.run.direct(config)
state.update(info)
# Update konfig
konfig = copy.deepcopy(config)
if konfig.get('WINDOW_CAUCHY_CRIT', 'NO') == 'YES' and konfig.TIME_MARCHING != 'NO':
konfig['TIME_ITER'] = info.WND_CAUCHY_DATA['TIME_ITER']
konfig['ITER_AVERAGE_OBJ'] = info.WND_CAUCHY_DATA['ITER_AVERAGE_OBJ']
konfig['UNST_ADJOINT_ITER'] = info.WND_CAUCHY_DATA['UNST_ADJOINT_ITER']
SU2.io.restart2solution(konfig,state)
# Adjoint Solution
# Run all-at-once
if mode == "all" or mode == "adj":
restart_sol_activated = False
if konfig.get('TIME_DOMAIN','NO') == 'YES' and konfig.get('RESTART_SOL','NO') == 'YES':
restart_sol_activated = True
original_time_iter = konfig['TIME_ITER']
konfig['TIME_ITER'] = konfig['TIME_ITER'] - int(konfig['RESTART_ITER'])
konfig.RESTART_SOL = 'NO'
info = SU2.run.adjoint(konfig)
state.update(info)
# Workaround, since expandTime relies on UNST_ADJOINT_ITER to determine number of solution files.
if restart_sol_activated:
konfig['UNST_ADJOINT_ITER'] = original_time_iter - int(konfig['RESTART_ITER'])
SU2.io.restart2solution(konfig,state)
# reset changed time-iter values for the remaining program to original values
# Gradient Projection
info = SU2.run.projection(konfig,step)
state.update(info)
return state
#: continuous_adjoint()
# -------------------------------------------------------------------
# Alternate Formulation
# -------------------------------------------------------------------
def discrete_design( filename ,
partitions = 0 ,
compute = True ,
step = 1e-4 ,
validation = False):
# TODO:
# step
# Config
config = SU2.io.Config(filename)
config.NUMBER_PART = partitions
config['GRADIENT_METHOD'] = 'DISCRETE_ADJOINT'
ADJ_NAME = config.OBJECTIVE_FUNCTION
# State
state = SU2.io.State()
state_directdiff = SU2.io.State()
grads_directdiff = []
# if validation:
# state_directdiff.find_files(config)
# konfig = copy.deepcopy(config)
# konfig['DIRECT_DIFF'] = "DESIGN_VARIABLES"
# grad_directdiff = SU2.eval.gradients.directdiff(konfig,state_directdiff)
# state['FILES']['DIRECT'] = 'DIRECTDIFF/' + state_directdiff['FILES']['DIRECT']
# state['FUNCTIONS'] = state_directdiff['FUNCTIONS']
# check for existing files
if any([not compute, validation]) :
state.find_files(config)
else:
state.FILES.MESH = config.MESH_FILENAME
# Adjoint Gradient
grads = SU2.eval.grad( ADJ_NAME, config['GRADIENT_METHOD'], config, state )
# if validation:
# Definition_DV = config['DEFINITION_DV']
# n_dv = len(Definition_DV['KIND'])
# grads_dd = grad_directdiff[ADJ_NAME]
# print("Validation Summary")
# print("--------------------------")
# print("VARIABLE " + "DISCRETE ADJOINT" + " DIRECT DIFFERENTIATION" + " ERROR (%)")
# for idv in range(n_dv):
# if abs(grads[idv]) > abs(grads_dd[idv]):
# this_err = abs(grads[idv]/grads_dd[idv])
# else:
# this_err = abs(grads_dd[idv]/grads[idv])
# print(str(idv) + " " + str(grads[idv]) + " " + str(grads_dd[idv]) + " " + str((this_err-1)*100) + ' %')
return state
# -------------------------------------------------------------------
# Run Main Program
# -------------------------------------------------------------------
# this is only accessed if running from command prompt
if __name__ == '__main__':
main()
| lgpl-2.1 |
wuhengzhi/chromium-crosswalk | tools/memory_inspector/memory_inspector/classification/rules.py | 109 | 5039 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module defines the core structure of the classification rules.
This module does NOT specify how the rules filter the data: this responsibility
is of to the concrete classifiers, which have to override the Rule class herein
defined and know how to do the math.
This module, instead, defines the format of the rules and the way they are
encoded and loaded (in a python-style dictionary file).
Rules are organized in a tree, where the root is always represented by a 'Total'
node, and the leaves are arbitrarily defined by the user, according to the
following principles:
- Order of siblings rules matter: what is caught by a rule will not be caught
by the next ones, but it is propagated to its children rules if any.
- Every non-leaf node X gets an implicit extra-children named X-other. This
catch-all child catches everything (within the parent rule scope) that is
not caught by the other siblings. This is to guarantee that, when doing the
math (the aggregation), at any level, the sum of the values in the leaves
match the value of their parent.
The format of a rule dictionary is the following:
[
{
'name': 'Name of the rule',
'filter-X': 'The embedder will know how to interpret this value and will use
it to filter the data'
'filter-Y': 'Idem'
children: [
{
'name': 'Name of the sub-rule 1'
... and so on recursively ,
},
]
},
]
And a typical resulting rule tree looks like this:
+----------------------+
| Total |
|----------------------|
+------------------+ Match all. +--------------------+
| +----------+-----------+ |
| | |
+-----v-----+ +-----v-----+ +------v----+
| Foo | | Bar | |Total-other|
|-----------| |-----------| |-----------|
|File: foo* | +---+File: bar* +-----+ | Match all |
+-----------+ | +-----------+ | +-----------+
| |
+------v------+ +------v----+
| Bar::Coffee | | Bar-other |
|-------------| |-----------|
|File: bar*cof| | Match all |
+-------------+ +-----------+
"""
import ast
def Load(content, rule_builder):
"""Construct a rule tree from a python-style dict representation.
Args:
content: a string containing the dict (i.e. content of the rule file).
rule_builder: a method which takes two arguments (rule_name, filters_dict)
and returns a subclass of |Rule|. |filters_dict| is a dict of the keys
(filter-foo, filter-bar in the example above) for the rule node.
"""
rules_dict = ast.literal_eval(content)
root = Rule('Total')
_MakeRuleNodeFromDictNode(root, rules_dict, rule_builder)
return root
class Rule(object):
""" An abstract class representing a rule node in the rules tree.
Embedders must override the Match method when deriving this class.
"""
def __init__(self, name):
self.name = name
self.children = []
def Match(self, _): # pylint: disable=R0201
""" The rationale of this default implementation is modeling the root
('Total') and the catch-all (*-other) rules that every |RuleTree| must have,
regardless of the embedder-specific children rules. This is to guarantee
that the totals match at any level of the tree.
"""
return True
def AppendChild(self, child_rule):
assert(isinstance(child_rule, Rule))
duplicates = filter(lambda x: x.name == child_rule.name, self.children)
assert(not duplicates), 'Duplicate rule ' + child_rule.name
self.children.append(child_rule)
def _MakeRuleNodeFromDictNode(rule_node, dict_nodes, rule_builder):
"""Recursive rule tree builder for traversing the rule dict."""
for dict_node in dict_nodes:
assert('name' in dict_node)
# Extract the filter keys (e.g., mmap-file, mmap-prot) that will be passed
# to the |rule_builder|
filter_keys = set(dict_node.keys()) - set(('name', 'children'))
filters = dict((k, dict_node[k]) for k in filter_keys)
child_rule = rule_builder(dict_node['name'], filters)
rule_node.AppendChild(child_rule)
dict_children = dict_node.get('children', {})
_MakeRuleNodeFromDictNode(child_rule, dict_children, rule_builder)
# If the rule_node isn't a leaf, add the 'name-other' catch-all sibling to
# catch all the entries that matched this node but none of its children.
if len(rule_node.children):
rule_node.AppendChild(Rule(rule_node.name + '-other')) | bsd-3-clause |
lifei/flask-admin | flask_admin/contrib/geoa/fields.py | 1 | 2904 | import warnings
import geoalchemy2
from flask import current_app
from shapely.geometry import shape
from sqlalchemy import func
from flask_admin.form import JSONField
from .widgets import LeafletWidget
class GeoJSONField(JSONField):
widget = LeafletWidget()
def __init__(self, label=None, validators=None, geometry_type="GEOMETRY",
srid='-1', session=None, **kwargs):
super(GeoJSONField, self).__init__(label, validators, **kwargs)
self.web_srid = 4326
self.srid = srid
if self.srid is -1:
self.transform_srid = self.web_srid
else:
self.transform_srid = self.srid
self.geometry_type = geometry_type.upper()
self.session = session
def _flip_coordinates(self, other_func):
if current_app.config.get('MAPBOX_FIX_COORDINATES_ORDER'):
return func.ST_FlipCoordinates(other_func)
else:
warnings.warn(
'Consider setting the Flask config option '
'MAPBOX_FIX_COORDINATES_ORDER as the current implementation '
'passes lng/lat coordinates in the wrong order to '
'Leaflet. Without this setting any coordinates saved will '
'have flipped coordinates in your database. '
'Please note that this will become the standard behavior in '
'the next major version of Flask-Admin.'
)
return other_func
def _value(self):
if self.raw_data:
return self.raw_data[0]
if type(self.data) is geoalchemy2.elements.WKBElement:
if self.srid is -1:
return self.session.scalar(
func.ST_AsGeoJson(
self._flip_coordinates(self.data)
)
)
else:
return self.session.scalar(
func.ST_AsGeoJson(
self._flip_coordinates(
func.ST_Transform(self.data, self.web_srid)
)
)
)
else:
return ''
def process_formdata(self, valuelist):
super(GeoJSONField, self).process_formdata(valuelist)
if str(self.data) is '':
self.data = None
if self.data is not None:
web_shape = self.session.scalar(
func.ST_AsText(
self._flip_coordinates(
func.ST_Transform(
func.ST_GeomFromText(
shape(self.data).wkt,
self.web_srid
),
self.transform_srid
)
)
)
)
self.data = 'SRID=' + str(self.srid) + ';' + str(web_shape)
| bsd-3-clause |
jbittel/django-mama-cas | mama_cas/utils.py | 1 | 2745 | import logging
from urllib.parse import parse_qsl, urlencode, urlparse, urlunparse
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.urls import reverse, NoReverseMatch
from django.http import HttpResponseRedirect
from django.utils.encoding import force_bytes
from mama_cas.services import service_allowed
logger = logging.getLogger(__name__)
def add_query_params(url, params):
"""
Inject additional query parameters into an existing URL. If
parameters already exist with the same name, they will be
overwritten. Parameters with empty values are ignored. Return
the modified URL as a string.
"""
def encode(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
params = dict([(encode(k), encode(v)) for k, v in params.items() if v])
parts = list(urlparse(url))
query = dict(parse_qsl(parts[4]))
query.update(params)
parts[4] = urlencode(query)
return urlunparse(parts)
def is_scheme_https(url):
"""
Test the scheme of the parameter URL to see if it is HTTPS. If
it is HTTPS return ``True``, otherwise return ``False``.
"""
return 'https' == urlparse(url).scheme
def clean_service_url(url):
"""
Return only the scheme, hostname (with optional port) and path
components of the parameter URL.
"""
parts = urlparse(url)
return urlunparse((parts.scheme, parts.netloc, parts.path, '', '', ''))
def match_service(service1, service2):
"""
Compare two service URLs. Return ``True`` if the scheme, hostname,
optional port and path match.
"""
s1, s2 = urlparse(service1), urlparse(service2)
try:
return (s1.scheme, s1.netloc, s1.path) == (s2.scheme, s2.netloc, s2.path)
except ValueError:
return False
def redirect(to, *args, **kwargs):
"""
Similar to the Django ``redirect`` shortcut but with altered
functionality. If an optional ``params`` argument is provided, the
dictionary items will be injected as query parameters on the
redirection URL.
"""
params = kwargs.pop('params', {})
try:
to = reverse(to, args=args, kwargs=kwargs)
except NoReverseMatch:
if '/' not in to and '.' not in to:
to = reverse('cas_login')
elif not service_allowed(to):
raise PermissionDenied()
if params:
to = add_query_params(to, params)
logger.debug("Redirecting to %s" % to)
return HttpResponseRedirect(to)
def to_bool(str):
"""
Converts a given string to a boolean value. Leading and trailing
whitespace is ignored, so strings of whitespace are evaluated as
``False``.
"""
if str:
return bool(str.strip())
return False
| bsd-3-clause |
jessekl/flixr | venv/lib/python2.7/site-packages/lxml/html/html5parser.py | 70 | 6498 | """
An interface to html5lib that mimics the lxml.html interface.
"""
import sys
import string
from html5lib import HTMLParser as _HTMLParser
from html5lib.treebuilders.etree_lxml import TreeBuilder
from lxml import etree
from lxml.html import _contains_block_level_tag, XHTML_NAMESPACE, Element
# python3 compatibility
try:
_strings = basestring
except NameError:
_strings = (bytes, str)
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
class HTMLParser(_HTMLParser):
"""An html5lib HTML parser with lxml as tree."""
def __init__(self, strict=False, **kwargs):
_HTMLParser.__init__(self, strict=strict, tree=TreeBuilder, **kwargs)
try:
from html5lib import XHTMLParser as _XHTMLParser
except ImportError:
pass
else:
class XHTMLParser(_XHTMLParser):
"""An html5lib XHTML Parser with lxml as tree."""
def __init__(self, strict=False, **kwargs):
_XHTMLParser.__init__(self, strict=strict, tree=TreeBuilder, **kwargs)
xhtml_parser = XHTMLParser()
def _find_tag(tree, tag):
elem = tree.find(tag)
if elem is not None:
return elem
return tree.find('{%s}%s' % (XHTML_NAMESPACE, tag))
def document_fromstring(html, guess_charset=True, parser=None):
"""Parse a whole document into a string."""
if not isinstance(html, _strings):
raise TypeError('string required')
if parser is None:
parser = html_parser
return parser.parse(html, useChardet=guess_charset).getroot()
def fragments_fromstring(html, no_leading_text=False,
guess_charset=False, parser=None):
"""Parses several HTML elements, returning a list of elements.
The first item in the list may be a string. If no_leading_text is true,
then it will be an error if there is leading text, and it will always be
a list of only elements.
If `guess_charset` is `True` and the text was not unicode but a
bytestring, the `chardet` library will perform charset guessing on the
string.
"""
if not isinstance(html, _strings):
raise TypeError('string required')
if parser is None:
parser = html_parser
children = parser.parseFragment(html, 'div', useChardet=guess_charset)
if children and isinstance(children[0], _strings):
if no_leading_text:
if children[0].strip():
raise etree.ParserError('There is leading text: %r' %
children[0])
del children[0]
return children
def fragment_fromstring(html, create_parent=False,
guess_charset=False, parser=None):
"""Parses a single HTML element; it is an error if there is more than
one element, or if anything but whitespace precedes or follows the
element.
If create_parent is true (or is a tag name) then a parent node
will be created to encapsulate the HTML in a single element. In
this case, leading or trailing text is allowed.
"""
if not isinstance(html, _strings):
raise TypeError('string required')
accept_leading_text = bool(create_parent)
elements = fragments_fromstring(
html, guess_charset=guess_charset, parser=parser,
no_leading_text=not accept_leading_text)
if create_parent:
if not isinstance(create_parent, _strings):
create_parent = 'div'
new_root = Element(create_parent)
if elements:
if isinstance(elements[0], _strings):
new_root.text = elements[0]
del elements[0]
new_root.extend(elements)
return new_root
if not elements:
raise etree.ParserError('No elements found')
if len(elements) > 1:
raise etree.ParserError('Multiple elements found')
result = elements[0]
if result.tail and result.tail.strip():
raise etree.ParserError('Element followed by text: %r' % result.tail)
result.tail = None
return result
def fromstring(html, guess_charset=True, parser=None):
"""Parse the html, returning a single element/document.
This tries to minimally parse the chunk of text, without knowing if it
is a fragment or a document.
base_url will set the document's base_url attribute (and the tree's docinfo.URL)
"""
if not isinstance(html, _strings):
raise TypeError('string required')
doc = document_fromstring(html, parser=parser,
guess_charset=guess_charset)
# document starts with doctype or <html>, full document!
start = html[:50].lstrip().lower()
if start.startswith('<html') or start.startswith('<!doctype'):
return doc
head = _find_tag(doc, 'head')
# if the head is not empty we have a full document
if len(head):
return doc
body = _find_tag(doc, 'body')
# The body has just one element, so it was probably a single
# element passed in
if (len(body) == 1 and (not body.text or not body.text.strip())
and (not body[-1].tail or not body[-1].tail.strip())):
return body[0]
# Now we have a body which represents a bunch of tags which have the
# content that was passed in. We will create a fake container, which
# is the body tag, except <body> implies too much structure.
if _contains_block_level_tag(body):
body.tag = 'div'
else:
body.tag = 'span'
return body
def parse(filename_url_or_file, guess_charset=True, parser=None):
"""Parse a filename, URL, or file-like object into an HTML document
tree. Note: this returns a tree, not an element. Use
``parse(...).getroot()`` to get the document root.
"""
if parser is None:
parser = html_parser
if not isinstance(filename_url_or_file, _strings):
fp = filename_url_or_file
elif _looks_like_url(filename_url_or_file):
fp = urlopen(filename_url_or_file)
else:
fp = open(filename_url_or_file, 'rb')
return parser.parse(fp, useChardet=guess_charset)
def _looks_like_url(str):
scheme = urlparse(str)[0]
if not scheme:
return False
elif (sys.platform == 'win32' and
scheme in string.ascii_letters
and len(scheme) == 1):
# looks like a 'normal' absolute path
return False
else:
return True
html_parser = HTMLParser()
| mit |
vaginessa/pyload | module/PyFile.py | 40 | 8284 | #!/usr/bin/env python
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: RaNaN
@author: mkaay
"""
from module.PullEvents import UpdateEvent
from module.utils import formatSize, lock
from time import sleep, time
from threading import RLock
statusMap = {
"finished": 0,
"offline": 1,
"online": 2,
"queued": 3,
"skipped": 4,
"waiting": 5,
"temp. offline": 6,
"starting": 7,
"failed": 8,
"aborted": 9,
"decrypting": 10,
"custom": 11,
"downloading": 12,
"processing": 13,
"unknown": 14,
}
def setSize(self, value):
self._size = int(value)
class PyFile(object):
"""
Represents a file object at runtime
"""
__slots__ = ("m", "id", "url", "name", "size", "_size", "status", "pluginname", "packageid",
"error", "order", "lock", "plugin", "waitUntil", "active", "abort", "statusname",
"reconnected", "progress", "maxprogress", "pluginmodule", "pluginclass")
def __init__(self, manager, id, url, name, size, status, error, pluginname, package, order):
self.m = manager
self.id = int(id)
self.url = url
self.name = name
self.size = size
self.status = status
self.pluginname = pluginname
self.packageid = package #should not be used, use package() instead
self.error = error
self.order = order
# database information ends here
self.lock = RLock()
self.plugin = None
#self.download = None
self.waitUntil = 0 # time() + time to wait
# status attributes
self.active = False #obsolete?
self.abort = False
self.reconnected = False
self.statusname = None
self.progress = 0
self.maxprogress = 100
self.m.cache[int(id)] = self
# will convert all sizes to ints
size = property(lambda self: self._size, setSize)
def __repr__(self):
return "PyFile %s: %s@%s" % (self.id, self.name, self.pluginname)
@lock
def initPlugin(self):
""" inits plugin instance """
if not self.plugin:
self.pluginmodule = self.m.core.pluginManager.getPlugin(self.pluginname)
self.pluginclass = getattr(self.pluginmodule, self.m.core.pluginManager.getPluginName(self.pluginname))
self.plugin = self.pluginclass(self)
@lock
def hasPlugin(self):
"""Thread safe way to determine this file has initialized plugin attribute
:return:
"""
return hasattr(self, "plugin") and self.plugin
def package(self):
""" return package instance"""
return self.m.getPackage(self.packageid)
def setStatus(self, status):
self.status = statusMap[status]
self.sync() #@TODO needed aslong no better job approving exists
def setCustomStatus(self, msg, status="processing"):
self.statusname = msg
self.setStatus(status)
def getStatusName(self):
if self.status not in (13, 14) or not self.statusname:
return self.m.statusMsg[self.status]
else:
return self.statusname
def hasStatus(self, status):
return statusMap[status] == self.status
def sync(self):
"""sync PyFile instance with database"""
self.m.updateLink(self)
@lock
def release(self):
"""sync and remove from cache"""
# file has valid package
if self.packageid > 0:
self.sync()
if hasattr(self, "plugin") and self.plugin:
self.plugin.clean()
del self.plugin
self.m.releaseLink(self.id)
def delete(self):
"""delete pyfile from database"""
self.m.deleteLink(self.id)
def toDict(self):
"""return dict with all information for interface"""
return self.toDbDict()
def toDbDict(self):
"""return data as dict for databse
format:
{
id: {'url': url, 'name': name ... }
}
"""
return {
self.id: {
'id': self.id,
'url': self.url,
'name': self.name,
'plugin': self.pluginname,
'size': self.getSize(),
'format_size': self.formatSize(),
'status': self.status,
'statusmsg': self.getStatusName(),
'package': self.packageid,
'error': self.error,
'order': self.order
}
}
def abortDownload(self):
"""abort pyfile if possible"""
while self.id in self.m.core.threadManager.processingIds():
self.abort = True
if self.plugin and self.plugin.req:
self.plugin.req.abortDownloads()
sleep(0.1)
self.abort = False
if self.hasPlugin() and self.plugin.req:
self.plugin.req.abortDownloads()
self.release()
def finishIfDone(self):
"""set status to finish and release file if every thread is finished with it"""
if self.id in self.m.core.threadManager.processingIds():
return False
self.setStatus("finished")
self.release()
self.m.checkAllLinksFinished()
return True
def checkIfProcessed(self):
self.m.checkAllLinksProcessed(self.id)
def formatWait(self):
""" formats and return wait time in humanreadable format """
seconds = self.waitUntil - time()
if seconds < 0: return "00:00:00"
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
return "%.2i:%.2i:%.2i" % (hours, minutes, seconds)
def formatSize(self):
""" formats size to readable format """
return formatSize(self.getSize())
def formatETA(self):
""" formats eta to readable format """
seconds = self.getETA()
if seconds < 0: return "00:00:00"
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
return "%.2i:%.2i:%.2i" % (hours, minutes, seconds)
def getSpeed(self):
""" calculates speed """
try:
return self.plugin.req.speed
except:
return 0
def getETA(self):
""" gets established time of arrival"""
try:
return self.getBytesLeft() / self.getSpeed()
except:
return 0
def getBytesLeft(self):
""" gets bytes left """
try:
return self.plugin.req.size - self.plugin.req.arrived
except:
return 0
def getPercent(self):
""" get % of download """
if self.status == 12:
try:
return self.plugin.req.percent
except:
return 0
else:
return self.progress
def getSize(self):
""" get size of download """
try:
if self.plugin.req.size:
return self.plugin.req.size
else:
return self.size
except:
return self.size
def notifyChange(self):
e = UpdateEvent("file", self.id, "collector" if not self.package().queue else "queue")
self.m.core.pullManager.addEvent(e)
def setProgress(self, value):
if not value == self.progress:
self.progress = value
self.notifyChange()
| gpl-3.0 |
ktrumble/prmai | lib/userdb.py | 1 | 4718 | from mongo import db
import postsdb
# For update_twitter
import tweepy
import settings
import urllib2
"""
{
'user': { 'id_str':'', 'auth_type': '', 'username': '', 'fullname': '', 'screen_name': '', 'profile_image_url_https': '', 'profile_image_url': '', 'is_blacklisted': False }
'access_token': { 'secret': '', 'user_id': '', 'screen_name': '', 'key': '' },
'email_address': '',
'role': '',
'tags':[]
}
"""
#db.user_info.ensure_index('user.screen_name')
def get_all():
return list(db.user_info.find({}))
def get_user_by_id_str(id_str):
return db.user_info.find_one({'user.id_str': id_str})
def get_user_by_screen_name(screen_name):
return db.user_info.find_one({'user.screen_name': screen_name})
def get_user_by_email(email_address):
return db.user_info.find_one({'email_address':email_address})
def get_disqus_users():
return db.user_info.find({'disqus': { '$exists': 'true' }})
def get_newsletter_recipients():
return list(db.user_info.find({'wants_daily_email': True}))
def create_new_user(user, access_token):
return db.user_info.update({'user.id_str': user['id_str']}, {'user':user, 'access_token':access_token, 'email_address':'', 'role':''}, upsert=True)
def save_user(user):
return db.user_info.update({'user.id_str': user['user']['id_str']}, user)
def get_user_count():
return db.user_info.count()
def add_tags_to_user(screen_name, tags=[]):
return db.user_info.update({'user.screen_name':screen_name}, {'$addToSet':{'tags':{'$each':tags}}})
###########################
### SCRIPT FUNCTIONS
###########################
''' Updates twitter account of id id_str, or else updates all twitter accounts.
Updating all accounts will probably cause API to puke from too many requests '''
def update_twitter(id_str=None, api=None):
if not api:
consumer_key = settings.get('twitter_consumer_key')
consumer_secret = settings.get('twitter_consumer_secret')
auth = tweepy.OAuthHandler(consumer_key, consumer_secret, secure=True)
api = tweepy.API(auth)
if id_str:
users = [get_user_by_id_str(id_str)]
else:
users = get_all()
for user in users:
id_str = user['user']['id_str']
twitter_user = api.get_user(id=id_str)
if id_str != twitter_user.id_str:
raise Exception
user_data = {
'auth_type': 'twitter',
'id_str': twitter_user.id_str,
'username': twitter_user.screen_name,
'fullname': twitter_user.name,
'screen_name': twitter_user.screen_name,
'profile_image_url': twitter_user.profile_image_url,
'profile_image_url_https': twitter_user.profile_image_url_https,
}
updated_user = {'access_token': user['access_token'], 'user': user_data}
save_user(updated_user)
print "++ Updated user @%s" % user['user']['username']
user_posts = postsdb.get_posts_by_screen_name(twitter_user.screen_name, per_page=100, page=1)
for p in user_posts:
p['user'] = user_data
postsdb.save_post(p)
print "++++ Updated %s info for %s" % (p['user']['screen_name'], p['title'])
''' Only updates a user if their twitter profile image URL returns a 404 '''
def update_twitter_profile_images():
consumer_key = settings.get('twitter_consumer_key')
consumer_secret = settings.get('twitter_consumer_secret')
auth = tweepy.OAuthHandler(consumer_key, consumer_secret, secure=True)
api = tweepy.API(auth)
for user in get_all():
print "Checking user %s" % user['user']['screen_name']
try:
response= urllib2.urlopen(user['user']['profile_image_url_https'])
except urllib2.HTTPError, e:
if e.code == 404:
update_twitter(id_str=user['user']['id_str'], api=api)
''' Update all account info from twitter, i.e. profile pic
This currently times out for making too many API calls '''
'''
def update_twitter_all():
consumer_key = settings.get('twitter_consumer_key')
consumer_secret = settings.get('twitter_consumer_secret')
auth = tweepy.OAuthHandler(consumer_key, consumer_secret, secure=True)
api = tweepy.API(auth)
for user in get_all():
id_str = user['user']['id_str']
twitter_user = api.get_user(id=id_str)
if id_str != twitter_user.id_str:
raise Exception
user_data = {
'auth_type': 'twitter',
'id_str': twitter_user.id_str,
'username': twitter_user.screen_name,
'fullname': twitter_user.name,
'screen_name': twitter_user.screen_name,
'profile_image_url': twitter_user.profile_image_url,
'profile_image_url_https': twitter_user.profile_image_url_https,
}
updated_user = {'access_token': user['access_token'], 'user': user_data}
save_user(updated_user)
print "Updated user @%s" % user['user']['username']
''' | gpl-3.0 |
sadleader/odoo | addons/l10n_uy/__init__.py | 438 | 1070 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Openerp.uy <openerp.uy@lists.launchpad.net>
# Proyecto de Localización de OperERP para Uruguay
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
| agpl-3.0 |
piyushroshan/xen-4.3.2 | tools/xm-test/tests/vcpu-pin/01_vcpu-pin_basic_pos.py | 42 | 1246 | #!/usr/bin/python
# Copyright (C) International Business Machines Corp., 2005
# Author: Dan Smith <danms@us.ibm.com>
# 1) Make sure we have a multi cpu system
# 2) Create a test domain and pin its VCPU0 to CPU 0 and then 1
import sys;
import re;
from XmTestLib import *
# Verify that we can run this test on this host
if smpConcurrencyLevel() <= 1:
print "*** NOTE: This machine does not have more than one physical"
print " or logical cpu. The vcpu-pin test cannot be run!"
SKIP("Host not capable of running test")
domain = XmTestDomain()
try:
domain.start(noConsole=True)
except DomainError, e:
if verbose:
print "Failed to create test domain because:"
print e.extra
FAIL(str(e))
status, output = traceCommand("xm vcpu-pin %s 0 0" % domain.getName())
if status != 0:
FAIL("xm vcpu-pin returned invalid %i != 0" % status)
cpu = getVcpuInfo(domain.getName())[0]
if cpu != 0:
FAIL("failed to switch VCPU 0 to CPU 0")
status, output = traceCommand("xm vcpu-pin %s 0 1" % domain.getName())
if status != 0:
FAIL("xm vcpu-pin returned invalid %i != 0" % status)
cpu = getVcpuInfo(domain.getName())[0]
if cpu != 1:
FAIL("failed to switch VCPU 0 to CPU 1")
domain.stop()
| gpl-2.0 |
shssoichiro/servo | tests/wpt/harness/wptrunner/wpttest.py | 58 | 10090 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
DEFAULT_TIMEOUT = 10 # seconds
LONG_TIMEOUT = 60 # seconds
import os
import mozinfo
from wptmanifest.parser import atoms
atom_reset = atoms["Reset"]
enabled_tests = set(["testharness", "reftest", "wdspec"])
class Result(object):
def __init__(self, status, message, expected=None, extra=None):
if status not in self.statuses:
raise ValueError("Unrecognised status %s" % status)
self.status = status
self.message = message
self.expected = expected
self.extra = extra
def __repr__(self):
return "<%s.%s %s>" % (self.__module__, self.__class__.__name__, self.status)
class SubtestResult(object):
def __init__(self, name, status, message, stack=None, expected=None):
self.name = name
if status not in self.statuses:
raise ValueError("Unrecognised status %s" % status)
self.status = status
self.message = message
self.stack = stack
self.expected = expected
def __repr__(self):
return "<%s.%s %s %s>" % (self.__module__, self.__class__.__name__, self.name, self.status)
class TestharnessResult(Result):
default_expected = "OK"
statuses = set(["OK", "ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT", "CRASH"])
class TestharnessSubtestResult(SubtestResult):
default_expected = "PASS"
statuses = set(["PASS", "FAIL", "TIMEOUT", "NOTRUN"])
class ReftestResult(Result):
default_expected = "PASS"
statuses = set(["PASS", "FAIL", "ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT", "CRASH"])
class WdspecResult(Result):
default_expected = "OK"
statuses = set(["OK", "ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT", "CRASH"])
class WdspecSubtestResult(SubtestResult):
default_expected = "PASS"
statuses = set(["PASS", "FAIL", "ERROR"])
def get_run_info(metadata_root, product, **kwargs):
return RunInfo(metadata_root, product, **kwargs)
class RunInfo(dict):
def __init__(self, metadata_root, product, debug, extras=None):
self._update_mozinfo(metadata_root)
self.update(mozinfo.info)
self["product"] = product
if debug is not None:
self["debug"] = debug
elif "debug" not in self:
# Default to release
self["debug"] = False
if extras is not None:
self.update(extras)
def _update_mozinfo(self, metadata_root):
"""Add extra build information from a mozinfo.json file in a parent
directory"""
path = metadata_root
dirs = set()
while path != os.path.expanduser('~'):
if path in dirs:
break
dirs.add(str(path))
path = os.path.split(path)[0]
mozinfo.find_and_update_from_json(*dirs)
class Test(object):
result_cls = None
subtest_result_cls = None
test_type = None
def __init__(self, url, inherit_metadata, test_metadata, timeout=DEFAULT_TIMEOUT, path=None,
protocol="http"):
self.url = url
self._inherit_metadata = inherit_metadata
self._test_metadata = test_metadata
self.timeout = timeout
self.path = path
self.environment = {"protocol": protocol, "prefs": self.prefs}
def __eq__(self, other):
return self.id == other.id
@classmethod
def from_manifest(cls, manifest_item, inherit_metadata, test_metadata):
timeout = LONG_TIMEOUT if manifest_item.timeout == "long" else DEFAULT_TIMEOUT
return cls(manifest_item.url,
inherit_metadata,
test_metadata,
timeout=timeout,
path=manifest_item.source_file.path,
protocol="https" if hasattr(manifest_item, "https") and manifest_item.https else "http")
@property
def id(self):
return self.url
@property
def keys(self):
return tuple()
def _get_metadata(self, subtest=None):
if self._test_metadata is not None and subtest is not None:
return self._test_metadata.get_subtest(subtest)
else:
return self._test_metadata
def itermeta(self, subtest=None):
for metadata in self._inherit_metadata:
yield metadata
if self._test_metadata is not None:
yield self._get_metadata()
if subtest is not None:
subtest_meta = self._get_metadata(subtest)
if subtest_meta is not None:
yield subtest_meta
def disabled(self, subtest=None):
for meta in self.itermeta(subtest):
disabled = meta.disabled
if disabled is not None:
return disabled
return None
@property
def restart_after(self):
for meta in self.itermeta(None):
restart_after = meta.restart_after
if restart_after is not None:
return True
return False
@property
def tags(self):
tags = set()
for meta in self.itermeta():
meta_tags = meta.tags
if atom_reset in meta_tags:
tags = meta_tags.copy()
tags.remove(atom_reset)
else:
tags |= meta_tags
tags.add("dir:%s" % self.id.lstrip("/").split("/")[0])
return tags
@property
def prefs(self):
prefs = {}
for meta in self.itermeta():
meta_prefs = meta.prefs
if atom_reset in prefs:
prefs = meta_prefs.copy()
del prefs[atom_reset]
else:
prefs.update(meta_prefs)
return prefs
def expected(self, subtest=None):
if subtest is None:
default = self.result_cls.default_expected
else:
default = self.subtest_result_cls.default_expected
metadata = self._get_metadata(subtest)
if metadata is None:
return default
try:
return metadata.get("expected")
except KeyError:
return default
def __repr__(self):
return "<%s.%s %s>" % (self.__module__, self.__class__.__name__, self.id)
class TestharnessTest(Test):
result_cls = TestharnessResult
subtest_result_cls = TestharnessSubtestResult
test_type = "testharness"
@property
def id(self):
return self.url
class ManualTest(Test):
test_type = "manual"
@property
def id(self):
return self.url
class ReftestTest(Test):
result_cls = ReftestResult
test_type = "reftest"
def __init__(self, url, inherit_metadata, test_metadata, references,
timeout=DEFAULT_TIMEOUT, path=None, viewport_size=None,
dpi=None, protocol="http"):
Test.__init__(self, url, inherit_metadata, test_metadata, timeout, path, protocol)
for _, ref_type in references:
if ref_type not in ("==", "!="):
raise ValueError
self.references = references
self.viewport_size = viewport_size
self.dpi = dpi
@classmethod
def from_manifest(cls,
manifest_test,
inherit_metadata,
test_metadata,
nodes=None,
references_seen=None):
timeout = LONG_TIMEOUT if manifest_test.timeout == "long" else DEFAULT_TIMEOUT
if nodes is None:
nodes = {}
if references_seen is None:
references_seen = set()
url = manifest_test.url
node = cls(manifest_test.url,
inherit_metadata,
test_metadata,
[],
timeout=timeout,
path=manifest_test.path,
viewport_size=manifest_test.viewport_size,
dpi=manifest_test.dpi,
protocol="https" if hasattr(manifest_test, "https") and manifest_test.https else "http")
nodes[url] = node
for ref_url, ref_type in manifest_test.references:
comparison_key = (ref_type,) + tuple(sorted([url, ref_url]))
if ref_url in nodes:
manifest_node = ref_url
if comparison_key in references_seen:
# We have reached a cycle so stop here
# Note that just seeing a node for the second time is not
# enough to detect a cycle because
# A != B != C != A must include C != A
# but A == B == A should not include the redundant B == A.
continue
references_seen.add(comparison_key)
manifest_node = manifest_test.manifest.get_reference(ref_url)
if manifest_node:
reference = ReftestTest.from_manifest(manifest_node,
[],
None,
nodes,
references_seen)
else:
reference = ReftestTest(ref_url, [], None, [])
node.references.append((reference, ref_type))
return node
@property
def id(self):
return self.url
@property
def keys(self):
return ("reftype", "refurl")
class WdspecTest(Test):
result_cls = WdspecResult
subtest_result_cls = WdspecSubtestResult
test_type = "wdspec"
manifest_test_cls = {"reftest": ReftestTest,
"testharness": TestharnessTest,
"manual": ManualTest,
"wdspec": WdspecTest}
def from_manifest(manifest_test, inherit_metadata, test_metadata):
test_cls = manifest_test_cls[manifest_test.item_type]
return test_cls.from_manifest(manifest_test, inherit_metadata, test_metadata)
| mpl-2.0 |
vityagi/azure-linux-extensions | OSPatching/patch/__init__.py | 16 | 2381 | #!/usr/bin/python
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.4+
import os
import re
import platform
from UbuntuPatching import UbuntuPatching
from redhatPatching import redhatPatching
from centosPatching import centosPatching
from OraclePatching import OraclePatching
from SuSEPatching import SuSEPatching
# Define the function in case waagent(<2.0.4) doesn't have DistInfo()
def DistInfo(fullname=0):
if 'FreeBSD' in platform.system():
release = re.sub('\-.*\Z', '', str(platform.release()))
distinfo = ['FreeBSD', release]
return distinfo
if os.path.isfile('/etc/oracle-release'):
release = re.sub('\-.*\Z', '', str(platform.release()))
distinfo = ['Oracle', release]
return distinfo
if 'linux_distribution' in dir(platform):
distinfo = list(platform.linux_distribution(\
full_distribution_name=fullname))
# remove trailing whitespace in distro name
distinfo[0] = distinfo[0].strip()
return distinfo
else:
return platform.dist()
def GetMyPatching(hutil, patching_class_name=''):
"""
Return MyPatching object.
NOTE: Logging is not initialized at this point.
"""
if patching_class_name == '':
if 'Linux' in platform.system():
Distro = DistInfo()[0]
else: # I know this is not Linux!
if 'FreeBSD' in platform.system():
Distro = platform.system()
Distro = Distro.strip('"')
Distro = Distro.strip(' ')
patching_class_name = Distro + 'Patching'
else:
Distro = patching_class_name
if not globals().has_key(patching_class_name):
hutil.log_and_syslog(Distro + ' is not a supported distribution.')
return None
return globals()[patching_class_name](hutil)
| apache-2.0 |
fabianp/scikit-learn | benchmarks/bench_multilabel_metrics.py | 86 | 7286 | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| bsd-3-clause |
jvzantvoort/RackTablesDB | RackTablesDB/config.py | 1 | 2308 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""RackTablesDB.config - read the configuration for the RackTablesDB"""
import os
import logging
import ConfigParser
import traceback
class Config(object):
def __init__(self):
self.variables = dict()
self.database = dict()
self.has_been_read = False
self.configfiles = list()
self.configfiles.append(os.path.expanduser('~/.RackTablesDB.cfg'))
# NOTE: add smart extras
def read(self):
parser = ConfigParser.ConfigParser()
for configfile in self.configfiles:
if not os.path.exists(configfile):
logging.info('file {0} does not exist'.format(configfile))
continue
try:
parser.read(configfile)
except:
# NOTE: bad
logging.error(traceback.format_exc())
pass
if parser.has_section('database'):
for option in parser.options('database'):
self.database[option] = parser.get('database', option)
self.has_been_read = True
@property
def user(self):
"""return database user"""
if 'user' in self.database:
return self.database['user']
if self.has_been_read:
raise KeyError('cannot access value: user')
self.read()
return self.user
@property
def password(self):
"""return database password"""
if 'password' in self.database:
return self.database['password']
if self.has_been_read:
raise KeyError('cannot access value: password')
self.read()
return self.password
@property
def name(self):
"""return database name"""
if 'name' in self.database:
return self.database['name']
if self.has_been_read:
raise KeyError('cannot access value: name')
self.read()
return self.name
@property
def host(self):
"""return database host"""
if 'host' in self.database:
return self.database['host']
if self.has_been_read:
# default to localhost
self.database['host'] = 'localhost'
self.read()
return self.host
| mit |
neilhan/tensorflow | tensorflow/python/ops/control_flow_ops.py | 3 | 114338 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Control Flow Operations
TensorFlow provides several operations and classes that you can use to control
the execution of operations and add conditional dependencies to your graph.
@@identity
@@tuple
@@group
@@no_op
@@count_up_to
@@cond
@@case
@@while_loop
## Logical Operators
TensorFlow provides several operations that you can use to add logical operators
to your graph.
@@logical_and
@@logical_not
@@logical_or
@@logical_xor
## Comparison Operators
TensorFlow provides several operations that you can use to add comparison
operators to your graph.
@@equal
@@not_equal
@@less
@@less_equal
@@greater
@@greater_equal
@@select
@@where
## Debugging Operations
TensorFlow provides several operations that you can use to validate values and
debug your graph.
@@is_finite
@@is_inf
@@is_nan
@@verify_tensor_all_finite
@@check_numerics
@@add_check_numerics_ops
@@Assert
@@Print
"""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import control_flow_pb2
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.gen_control_flow_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
# We override the 'tuple' for a control flow op, so we keep python's
# existing 'tuple' for later use in this module.
_basetuple = tuple
# pylint: disable=protected-access
# Assert and Print are special symbols in python, so we must
# use an upper-case version of them.
def Assert(condition, data, summarize=None, name=None):
"""Asserts that the given condition is true.
If `condition` evaluates to false, print the list of tensors in `data`.
`summarize` determines how many entries of the tensors to print.
NOTE: To ensure that Assert executes, one usually attaches a dependency:
```python
# Ensure maximum element of x is smaller or equal to 1
assert_op = tf.Assert(tf.less_equal(tf.reduce_max(x), 1.), [x])
x = tf.with_dependencies([assert_op], x)
```
Args:
condition: The condition to evaluate.
data: The tensors to print out when condition is false.
summarize: Print this many entries of each tensor.
name: A name for this operation (optional).
Returns:
assert_op: An `Operation` that, when executed, raises a
`tf.errors.InvalidArgumentError` if `condition` is not true.
"""
with ops.name_scope(name, "Assert", [condition, data]) as name:
condition = ops.convert_to_tensor(condition, name="Condition")
def true_assert():
return gen_logging_ops._assert(
condition, data, summarize, name="Assert")
# TODO(ebrevdo): Remove the cond once when can tell all inputs are on host.
guarded_assert = cond(
condition, no_op, true_assert, name="AssertGuard")
return guarded_assert.op
def _Identity(data, name=None):
"""Return a tensor with the same shape and contents as the input tensor.
Args:
data: A Tensor.
name: A name for this operation (optional).
Returns:
A Tensor with the same type and value as the input Tensor.
"""
data = ops.convert_to_tensor_or_indexed_slices(data, as_ref=True)
if isinstance(data, ops.Tensor):
if data.dtype.is_ref_dtype:
return gen_array_ops._ref_identity(data, name=name)
else:
return array_ops.identity(data, name=name)
else:
if not isinstance(data, (ops.IndexedSlices, ops.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = _Identity(data.values, name=name)
indices = array_ops.identity(data.indices, name="indices")
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape = array_ops.identity(dense_shape, name="dense_shape")
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = array_ops.identity(data.shape, name="dense_shape")
return ops.SparseTensor(indices, values, dense_shape)
def _NextIteration(data, name=None):
data = ops.convert_to_tensor_or_indexed_slices(data, as_ref=True)
if isinstance(data, ops.Tensor):
if data.dtype.is_ref_dtype:
return ref_next_iteration(data, name=name)
else:
return next_iteration(data, name=name)
else:
if not isinstance(data, (ops.IndexedSlices, ops.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = _NextIteration(data.values, name=name)
indices = next_iteration(data.indices, name="indices")
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape = next_iteration(dense_shape, name="dense_shape")
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = next_iteration(data.shape, name="dense_shape")
return ops.SparseTensor(indices, values, dense_shape)
def _Enter(data, frame_name, is_constant=False, parallel_iterations=10,
use_ref=True, use_input_shape=True, name=None):
"""Creates or finds a child frame, and makes `data` available to it.
The unique `frame_name` is used by the `Executor` to identify frames. If
`is_constant` is true, `data` is a constant in the child frame; otherwise
it may be changed in the child frame. At most `parallel_iterations`
iterations are run in parallel in the child frame.
Args:
data: The tensor to be made available to the child frame.
frame_name: The name of the child frame.
is_constant: If true, the output is constant within the child frame.
parallel_iterations: The number of iterations allowed to run in parallel.
use_ref: If true, use ref_enter if data is of ref type.
name: A name for this operation (optional).
Returns:
The same tensor as `data`.
"""
data = ops.convert_to_tensor_or_indexed_slices(data, as_ref=True)
if isinstance(data, ops.Tensor):
if data.dtype.is_ref_dtype and use_ref:
result = ref_enter(data, frame_name, is_constant, parallel_iterations,
name=name)
else:
result = enter(data, frame_name, is_constant, parallel_iterations,
name=name)
if use_input_shape:
result.set_shape(data.get_shape())
return result
else:
if not isinstance(data, (ops.IndexedSlices, ops.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = _Enter(data.values, frame_name, is_constant,
parallel_iterations=parallel_iterations,
use_input_shape=use_input_shape, name=name)
indices = enter(data.indices, frame_name, is_constant,
parallel_iterations, name="indices")
if use_input_shape:
indices.set_shape(data.indices.get_shape())
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape = enter(dense_shape, frame_name, is_constant,
parallel_iterations, name="dense_shape")
if use_input_shape:
dense_shape.set_shape(data.dense_shape.get_shape())
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = enter(data.shape, frame_name, is_constant,
parallel_iterations, name="dense_shape")
if use_input_shape:
dense_shape.set_shape(data.shape.get_shape())
return ops.SparseTensor(indices, values, dense_shape)
def exit(data, name=None):
"""Exits the current frame to its parent frame.
Exit makes its input `data` available to the parent frame.
Args:
data: The tensor to be made available to the parent frame.
name: A name for this operation (optional).
Returns:
The same tensor as `data`.
"""
data = ops.convert_to_tensor_or_indexed_slices(data, as_ref=True)
if isinstance(data, ops.Tensor):
if data.dtype.is_ref_dtype:
return gen_control_flow_ops._ref_exit(data, name)
else:
return gen_control_flow_ops._exit(data, name)
else:
if not isinstance(data, (ops.IndexedSlices, ops.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = exit(data.values, name=name)
indices = gen_control_flow_ops._exit(data.indices, name="indices")
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape = gen_control_flow_ops._exit(dense_shape, name)
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = gen_control_flow_ops._exit(data.shape, name)
return ops.SparseTensor(indices, values, dense_shape)
def switch(data, pred, dtype=None, name=None):
"""Forwards `data` to an output determined by `pred`.
If `pred` is true, the `data` input is forwared to the first output.
Otherwise, the data goes to the second output.
This op handles `Tensor`s and `IndexedSlices`.
Args:
data: The tensor to be forwarded to the appropriate output.
pred: A scalar that specifies which output port will receive data.
dtype: Optional element type for the returned tensor. If missing,
the type is inferred from the type of `value`.
name: A name for this operation (optional).
Returns:
`(output_false, output_true)`: If `pred` is true, data will be forwarded
to `output_true`, otherwise it goes to `output_false`.
"""
with ops.name_scope(name, "Switch", [data, pred]) as name:
data = ops.convert_to_tensor_or_indexed_slices(data, dtype=dtype,
name="data", as_ref=True)
pred = ops.convert_to_tensor(pred, name="pred")
if isinstance(data, ops.Tensor):
return gen_control_flow_ops._switch(data, pred, name=name)
else:
if not isinstance(data, (ops.IndexedSlices, ops.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
val, ind = data.values, data.indices
val_f, val_t = gen_control_flow_ops._switch(val, pred, name=name)
ind_f, ind_t = gen_control_flow_ops._switch(ind, pred, name="indices")
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape_f, dense_shape_t = gen_control_flow_ops._switch(
dense_shape, pred, name="dense_shape")
else:
dense_shape_f, dense_shape_t = None, None
return (ops.IndexedSlices(val_f, ind_f, dense_shape_f),
ops.IndexedSlices(val_t, ind_t, dense_shape_t))
else:
dense_shape = data.shape
dense_shape_f, dense_shape_t = gen_control_flow_ops._switch(
data.shape, pred, name="dense_shape")
return (ops.SparseTensor(ind_f, val_f, dense_shape_f),
ops.SparseTensor(ind_t, val_t, dense_shape_t))
def _SwitchRefOrTensor(data, pred, name="Switch"):
"""Forwards `data` to an output determined by `pred`.
If `pred` is true, the `data` input is forwared to the first output.
Otherwise, the data goes to the second output.
This op handles `Tensor`s and `IndexedSlices`.
Args:
data: The tensor to be forwarded to the appropriate output.
pred: A scalar that specifies which output port will receive data.
name: A name for this operation (optional).
Returns:
`(output_false, output_false)`: If `pred` is true, data will be forwarded to
`output_true`, otherwise it goes to `output_false`.
Raises:
TypeError: if data is not a Tensor or IndexedSlices
"""
data = ops.convert_to_tensor_or_indexed_slices(data, name="data")
# NOTE(vrv): ops.colocate_with(data, ignore_existing=True) below
# addresses the following scenario.
#
# Assume you execute Optimizer.apply_gradients() in a branch of a cond().
#
# 1. The update op is created inside a `with ops.colocate(var):` block
#
# 2. Some tensor `data` is captured and a switch is created in a
# `with ops.colocate_with(data):` block.
#
# with ops.colocate_with(var):
# with ops.colocate_with(data):
# op = ...
#
# var and data may be pinned to different devices, so we want to ops
# created within ops.colocate_with(data) to ignore the existing stack.
with ops.colocate_with(data, ignore_existing=True):
if isinstance(data, ops.Tensor):
if data.dtype.is_ref_dtype:
return ref_switch(data, pred, name=name)
return switch(data, pred, name=name)
def merge(inputs, name=None):
"""Returns the value of an available element of `inputs`.
This op tests each of the tensors in `inputs` in turn to determine if any of
them is available. If it finds an available tensor, it returns it and its
index in `inputs`.
It is an error if more than one tensor in `inputs` is available. If no tensor
in `inputs` is available, the returned tensor and index are not set.
This op handles both `Tensor`s and `IndexedSlices`. If inputs has a mix of
`Tensor`s and `IndexedSlices`, all inputs are converted to IndexedSlices
before merging.
Args:
inputs: The input tensors, at most one of which is available.
name: A name for this operation (optional).
Returns:
A tuple containing the chosen input tensor and its index in `inputs`.
Raises:
ValueError: If any of the inputs is None, or inputs are IndexedSlices and
some but not all have a dense_shape property.
"""
if any([inp is None for inp in inputs]):
raise ValueError("At least one of the merge inputs is None: %s" % inputs)
with ops.name_scope(name, "Merge", inputs) as name:
inputs = [ops.convert_to_tensor_or_indexed_slices(inp, as_ref=True)
for inp in inputs]
if all([isinstance(v, ops.Tensor) for v in inputs]):
if all([v.dtype.is_ref_dtype for v in inputs]):
return gen_control_flow_ops._ref_merge(inputs, name)
else:
return gen_control_flow_ops._merge(inputs, name)
elif all([isinstance(v, ops.SparseTensor) for v in inputs]):
# Only handle the case when all inputs are SparseTensor.
values, _ = merge([inp.values for inp in inputs], name=name)
indices, chosen_index = gen_control_flow_ops._merge(
[inp.indices for inp in inputs], name="indices")
dense_shape, _ = gen_control_flow_ops._merge(
[inp.shape for inp in inputs], name="dense_shape")
return ops.SparseTensor(indices, values, dense_shape), chosen_index
else:
# For now convert all the inputs as IndexedSlices.
inputs = math_ops._as_indexed_slices_list(inputs, optimize=False)
values, _ = merge([inp.values for inp in inputs], name=name)
indices, chosen_index = gen_control_flow_ops._merge(
[inp.indices for inp in inputs], name="indices")
if any(inp.dense_shape is not None for inp in inputs):
if any(inp.dense_shape is None for inp in inputs):
raise ValueError("Either all merged IndexedSlices must have a "
"dense_shape, or none must have a dense_shape.")
dense_shape, _ = gen_control_flow_ops._merge(
[inp.dense_shape for inp in inputs], name="dense_shape")
else:
dense_shape = None
return ops.IndexedSlices(values, indices, dense_shape), chosen_index
# pylint: enable=protected-access
def _convert_tensorarrays_to_flows(tensors_or_tensor_arrays):
return [ta.flow if isinstance(ta, tensor_array_ops.TensorArray)
else ta
for ta in tensors_or_tensor_arrays]
def _make_tensor_array(ta, t_or_flow):
new_ta = tensor_array_ops.TensorArray(
dtype=ta.dtype, handle=ta.handle, flow=t_or_flow,
infer_shape=ta._infer_shape)
new_ta._elem_shape = ta._elem_shape
return new_ta
def _convert_flows_to_tensorarrays(tensors_or_tensorarrays, tensors_or_flows):
if len(tensors_or_tensorarrays) != len(tensors_or_flows):
raise ValueError(
"Lengths of original Tensor list and new list do not match: %d vs. %d"
% (len(tensors_or_tensorarrays), len(tensors_or_flows)))
return [
_make_tensor_array(ta, t_or_flow)
if isinstance(ta, tensor_array_ops.TensorArray)
else t_or_flow
for (ta, t_or_flow) in zip(tensors_or_tensorarrays, tensors_or_flows)]
def _IsLoopConstantEnter(op):
"""Return true iff op is a loop invariant."""
is_enter = (op.type == "Enter" or op.type == "RefEnter")
return is_enter and op.get_attr("is_constant")
def _GetLoopConstantEnter(value):
"""Return the enter op if we can infer `value` to be a loop invariant."""
id_ops = {"Switch", "RefSwitch", "Identity", "RefIdentity"}
op = value.op
while op.type in id_ops:
op = op.inputs[0].op
return op if _IsLoopConstantEnter(op) else None
def _GetOutputContext(op):
"""Return the control flow context for the output of an op."""
ctxt = op._get_control_flow_context()
if IsLoopExit(op):
ctxt = ctxt.outer_context
return ctxt
def _ShapeLessThanOrEqual(shape1, shape2):
if shape2.dims is None:
return True
if shape1.ndims != shape2.ndims:
return False
for dim1, dim2 in zip(shape1.dims, shape2.dims):
if dim2.value is not None and dim1.value != dim2.value:
return False
return True
def _SetShapeInvariants(input_vars, enter_vars, shapes):
"""Set the shapes of the tensors in `enter_vars` to `shapes`.
Args:
input_vars: A list of tensors that are inputs to `enter_vars`.
enter_vars: A list of tensors whose shapes will be set.
shapes: A (possibly nested) list of shapes.
Raises:
ValueError: If any tensor in `enter_vars` has a less specific shape
than its corresponding shape in `shapes`.
"""
if shapes is None:
return
flat_shapes = nest.flatten(shapes)
if not all([isinstance(s, tensor_shape.TensorShape) for s in flat_shapes]):
raise ValueError("`shapes` must be a (possibly nested) list of shapes.")
# Check that the shapes of the inputs are less than the shape invariants,
# and set the shapes of `enter_vars` to the shape invariants.
for inp, var, shape in zip(input_vars, enter_vars, flat_shapes):
if isinstance(var, ops.Tensor):
if not _ShapeLessThanOrEqual(inp.get_shape(), shape):
raise ValueError(
"The shape invariant specified for %s is not compatible with "
"the initial shape of the loop variable. It enters the loop "
"with shape %s, but the specified shape invariant is %s."
% (inp.name, inp.get_shape(), shape))
var.set_shape(shape)
else:
if not isinstance(var, (ops.IndexedSlices, ops.SparseTensor)):
raise TypeError("Type %s not supported" % type(var))
if isinstance(var, ops.IndexedSlices):
if not _ShapeLessThanOrEqual(inp.values.get_shape(), shape):
raise ValueError(
"The shape invariant specified for %s is not compatible with "
"the initial shape of the values tensor of this IndexedSlices. "
"It enters the loop with shape %s, but the specified shape "
"invariant is %s."
% (inp.values.name, inp.values.get_shape(), shape))
var.values.set_shape(shape)
var.indices.set_shape(tensor_shape.TensorShape([shape[0]]))
if var.dense_shape is not None:
var.dense_shape.set_shape(tensor_shape.TensorShape([shape.ndims]))
else:
if not _ShapeLessThanOrEqual(inp.shape.get_shape(), shape):
raise ValueError(
"The shape invariant specified for %s is not compatible with "
"the initial shape of the shape tensor of this SparseTensor. "
"It enters the loop with shape %s, but the specified shape "
"invariant is %s."
% (inp.shape.name, inp.shape.get_shape(), shape))
var.values.set_shape(tensor_shape.TensorShape([None]))
var.indices.set_shape(tensor_shape.TensorShape([None, shape.ndims]))
var.shape.set_shape(shape)
def _EnforceShapeInvariant(merge_var, next_var):
"""Check if the shapes of the loops variables are invariants.
Args:
merge_vars: The list of tensors representing the initial values of the
loop variables.
next_vars: The list of tensors representing the values of the loop
variables after one loop iteration.
Raises:
ValueError: If any tensor in `merge_vars` has a more specific shape than
its correspnding tensor in `next_var`.
"""
if isinstance(merge_var, ops.Tensor):
m_shape = merge_var.get_shape()
n_shape = next_var.get_shape()
if not _ShapeLessThanOrEqual(n_shape, m_shape):
raise ValueError(
"The shape for %s is not an invariant for the loop. It enters "
"the loop with shape %s, but has shape %s after one iteration. "
"Provide shape invariants using either the `shape_invariants` "
"argument of tf.while_loop or set_shape() on the loop variables."
% (merge_var.name, m_shape, n_shape))
else:
if not isinstance(var, (ops.IndexedSlices, ops.SparseTensor)):
raise TypeError("Type %s not supported" % type(var))
if isinstance(var, ops.IndexedSlices):
m_values_shape = merge_var.values.get_shape()
m_indices_shape = merge_var.indices.get_shape()
m_shape_shape = tensor_shape.TensorShape(None)
if merge_var.dense_shape is not None:
m_shape_shape = merge_var.dense_shape.get_shape()
n_values_shape = next_var.values.get_shape()
n_indices_shape = next_var.indices.get_shape()
n_shape_shape = tensor_shape.TensorShape(None)
if next_var.dense_shape is not None:
n_shape_shape = next_var.dense_shape.get_shape()
if (not _ShapeLessThanOrEqual(n_values_shape, m_values_shape) or
not _ShapeLessThanOrEqual(n_indices_shape, m_indices_shape)):
if not _ShapeLessThanOrEqual(n_values_shape, m_values_shape):
raise ValueError(
"The shape for %s is not an invariant for the loop. It enters "
"the loop with shape (%s, %s, %s), but has shape (%s, %s, %s) "
"after one iteration. Provide shape invariants using either the "
"`shape_invariants` argument of tf.while_loop or set_shape() "
"on the loop variables."
% (merge_var.name, m_values_shape, m_indices_shape, m_shape_shape,
n_values_shape, n_indices_shape, n_shape_shape))
else:
m_values_shape = merge_var.values.get_shape()
m_indices_shape = merge_var.indices.get_shape()
m_shape_shape = merge_var.shape.get_shape()
n_values_shape = next_var.values.get_shape()
n_indices_shape = next_var.indices.get_shape()
n_shape_shape = next_var.shape.get_shape()
if (not _ShapeLessThanOrEqual(n_values_shape, m_values_shape) or
not _ShapeLessThanOrEqual(n_indices_shape, m_indices_shape) or
not _ShapeLessThanOrEqual(n_shape_shape, m_shape_shape)):
raise ValueError(
"The shape for %s is not an invariant for the loop. It enters "
"the loop with shape (%s, %s, %s), but has shape (%s, %s, %s) "
"after one iteration. Provide shape invariants using either "
"the `shape_invariants` argument of tf.while_loop or set_shape() "
"on the loop variables."
% (merge_var.name, m_values_shape, m_indices_shape, m_shape_shape,
n_values_shape, n_indices_shape, n_shape_shape))
def _AddNextAndBackEdge(m, v):
"""Add NextIteration and back edge from v to m."""
if isinstance(m, ops.Tensor):
v = ops.convert_to_tensor(v)
v = _NextIteration(v)
m.op._update_input(1, v) # pylint: disable=protected-access
elif isinstance(m, ops.IndexedSlices):
# pylint: disable=protected-access
v = math_ops._as_indexed_slices(v, optimize=False)
v = _NextIteration(v)
m.values.op._update_input(1, v.values)
m.indices.op._update_input(1, v.indices)
# pylint: enable=protected-access
if m.dense_shape is not None:
if v.dense_shape is None:
raise ValueError("Must have dense shape: %s" % v.name)
m.dense_shape.op._update_input(1, v.dense_shape)
elif isinstance(m, ops.SparseTensor):
if not isinstance(v, ops.SparseTensor):
raise ValueError("Must be a sparse tensor: %s" % v.name)
v = _NextIteration(v)
# pylint: disable=protected-access
m.values.op._update_input(1, v.values)
m.indices.op._update_input(1, v.indices)
m.shape.op._update_input(1, v.shape)
# pylint: enable=protected-access
else:
raise TypeError("Type %s not supported" % type(m))
return v
class GradLoopState(object):
"""The state used for constructing the gradient graph for a while loop.
We create a GradLoopState for each while loop in forward and its
corresponding while loop in backprop. This gives us access to both
the forward and the backprop WhileContexts.
During the construction of gradient graph, any time when we detect
a forward value that is needed for backprop, we create a history
accumulator and add it to `history_map`. Any time when we backprop
a loop switch op (in _SwitchGrad), we add the grad merge op in
`switch_map`.
"""
def __init__(self, forward_ctxt, outer_grad_state):
# The grad loop state for the outer while loop.
self._outer_grad_state = None
# The while loop context for forward.
self._forward_context = None
# The loop counter added by AddForwardLoopCounter. It is the value
# of the loop counter for the next iteration.
self._forward_index = None
# A sync op for forward.
self._forward_sync = None
# The while loop context for backprop.
self._grad_context = None
# The loop counter added by AddBackPropLoopCounter. It is the value
# of the loop counter for the current iteration.
self._grad_index = None
# A sync op for backprop.
self._grad_sync = None
# Information needed by backprop.
self._history_map = {}
self._switch_map = {}
self._unused_exits = []
self._deferred_exits = []
self._pending_exits_count = len(forward_ctxt.loop_exits)
self._outer_grad_state = outer_grad_state
if outer_grad_state:
outer_forward_ctxt = outer_grad_state.forward_context
else:
outer_forward_ctxt = forward_ctxt.outer_context
# Add the forward loop counter.
if outer_forward_ctxt: outer_forward_ctxt.Enter()
cnt, forward_index = forward_ctxt.AddForwardLoopCounter(outer_grad_state)
if outer_forward_ctxt: outer_forward_ctxt.Exit()
self._forward_context = forward_ctxt
self._forward_index = forward_index
# Add the backprop WhileContext, and the backprop loop counter.
if outer_grad_state:
# This is a nested loop. Remember the iteration counts for each
# execution of this inner loop.
outer_forward_ctxt.AddName(cnt.name)
history_cnt = outer_grad_state.AddForwardAccumulator(cnt)
outer_grad_ctxt = outer_grad_state.grad_context
outer_grad_ctxt.Enter()
self._grad_context = WhileContext(forward_ctxt.parallel_iterations,
forward_ctxt.back_prop,
forward_ctxt.swap_memory,
forward_ctxt.name,
self)
real_cnt = outer_grad_state.AddBackPropAccumulatedValue(history_cnt, cnt)
self._grad_index = self._grad_context.AddBackPropLoopCounter(
real_cnt, outer_grad_state)
outer_grad_ctxt.Exit()
else:
if outer_forward_ctxt: outer_forward_ctxt.Enter()
self._grad_context = WhileContext(forward_ctxt.parallel_iterations,
forward_ctxt.back_prop,
forward_ctxt.swap_memory,
forward_ctxt.name,
self)
self._grad_index = self._grad_context.AddBackPropLoopCounter(
cnt, outer_grad_state)
if outer_forward_ctxt: outer_forward_ctxt.Exit()
@property
def outer_grad_state(self):
"""The grad loop state for outer loop."""
return self._outer_grad_state
@property
def forward_context(self):
"""The while loop context for forward."""
return self._forward_context
@property
def forward_index(self):
"""The loop index of forward loop."""
return self._forward_index
@property
def forward_sync(self):
"""A control trigger node for synchronization in the forward loop.
One main use is to keep the push ops of a stack executed in the
iteration order.
"""
if self._forward_sync is None:
with ops.control_dependencies(None):
self._forward_sync = control_trigger(name="f_sync")
self._forward_sync._set_control_flow_context(self._forward_context)
self._forward_index.op._add_control_input(self._forward_sync)
return self._forward_sync
@property
def grad_context(self):
"""The corresponding WhileContext for gradient."""
return self._grad_context
@property
def grad_index(self):
"""The loop index of backprop loop."""
return self._grad_index
@property
def grad_sync(self):
"""A control trigger node for synchronization in the grad loop.
One main use is to keep the pop ops of a stack executed in the
iteration order.
"""
if self._grad_sync is None:
with ops.control_dependencies(None):
self._grad_sync = control_trigger(name="b_sync")
self._grad_sync._set_control_flow_context(self._grad_context)
self._grad_index.op._add_control_input(self._grad_sync)
return self._grad_sync
@property
def history_map(self):
"""The map that records all the tensors needed for backprop."""
return self._history_map
@property
def switch_map(self):
"""The map that records all the Switch ops for the while loop."""
return self._switch_map
@property
def unused_exits(self):
"""The list of "unused" exits."""
return self._unused_exits
@property
def deferred_exits(self):
"""The list of "deferred" exits."""
return self._deferred_exits
@property
def pending_exits_count(self):
"""The number of exits we expect to see but haven't."""
return self._pending_exits_count
@pending_exits_count.setter
def pending_exits_count(self, cnt):
"""Set the pending count to cnt."""
self._pending_exits_count = cnt
def AddForwardAccumulator(self, value, dead_branch=False):
"""Add an accumulator for each forward tensor that is needed in backprop.
This is added to the forward loop at the first time when a tensor
in the forward loop is used by backprop gradient computation loop.
We create an accumulator that accumulates the value of tensor at each
iteration. Called in the control flow context where gradients() is called.
The pseudocode is:
```
acc = stack();
while (_pivot) {
acc = stack_push(acc, value);
}
```
We make sure that the stack push op in one iteration is executed before
next iteration. This is achieved by adding a control edge from
`forward_index.op.inputs[0].op` to the push op, and another control
edge from the push op to either `forward_index.op` or `forward_sync`.
Args:
value: The source tensor in forward that is to be accumulated.
dead_branch: True iff the tensor is on a dead branch of a cond.
Returns:
The stack that contains the accumulated history of the tensor.
Raises:
TypeError: For internal errors involving the value condition context.
"""
curr_ctxt = ops.get_default_graph()._get_control_flow_context()
with ops.control_dependencies(None):
if curr_ctxt: curr_ctxt.Enter()
with ops.colocate_with(value):
# pylint: disable=protected-access
acc = gen_data_flow_ops._stack(value.dtype.base_dtype, name="f_acc")
# pylint: enable=protected-access
if curr_ctxt: curr_ctxt.Exit()
# Make acc available in the forward context.
enter_acc = self.forward_context.AddValue(acc)
# Add the stack_push op in the context of value.op.
swap_enabled = self.forward_context.swap_memory
value_ctxt = _GetOutputContext(value.op)
if value_ctxt == self.forward_context:
# value is not nested in the forward context.
self.forward_context.Enter()
push = gen_data_flow_ops._stack_push(
enter_acc, value, swap_memory=swap_enabled)
self.forward_context.Exit()
# Protect stack push and order it before forward_index.
self.forward_index.op._add_control_input(push.op)
else:
# value is in a cond context within the forward context.
if not isinstance(value_ctxt, CondContext):
raise TypeError(
"value_ctxt is not a CondContext: %s" % value_ctxt)
if dead_branch:
# The special case for creating a zero tensor for a dead
# branch of a switch. See ControlFlowState.ZerosLike().
value_ctxt.outer_context.Enter()
push = gen_data_flow_ops._stack_push(
enter_acc, value, swap_memory=swap_enabled)
value_ctxt.outer_context.Exit()
push.op._set_control_flow_context(value_ctxt)
else:
value_ctxt.Enter()
push = gen_data_flow_ops._stack_push(
enter_acc, value, swap_memory=swap_enabled)
value_ctxt.Exit()
# Protect stack push and order it before forward_sync.
self.forward_sync._add_control_input(push.op)
# Order stack push after the successor of forward_index
add_op = self.forward_index.op.inputs[0].op
push.op._add_control_input(add_op)
return acc
def AddBackPropAccumulatedValue(self, history_value, value,
dead_branch=False):
"""Add the getter for an accumulated value in the grad context.
This is added to the backprop loop. Called in the grad context to
get the value of an accumulated value. The stack pop op must be guarded
by the pred of the controlling cond.
Args:
history_value: The history (a stack) of a value.
value: The value that is pushed onto the stack.
dead_branch: True iff the tensor is on a dead branch of a cond.
Returns:
The current value (the top of the stack).
"""
history_ctxt = history_value.op._get_control_flow_context()
# Find the cond context that controls history_value if any.
cond_ctxt = None
value_ctxt = value.op._get_control_flow_context()
while value_ctxt and value_ctxt != history_ctxt:
if isinstance(value_ctxt, CondContext):
cond_ctxt = value_ctxt
break
value_ctxt = value_ctxt.outer_context
with ops.control_dependencies(None):
self.grad_context.Enter()
if cond_ctxt:
# Guard stack pop with a switch if it is controlled by a cond.
grad_state = self
pred = None
while pred is None and grad_state:
pred = grad_state.history_map.get(cond_ctxt.pred.name)
grad_state = grad_state.outer_grad_state
if pred is None:
pred = cond_ctxt.pred
branch = (1 - cond_ctxt.branch) if dead_branch else cond_ctxt.branch
history_value = _SwitchRefOrTensor(history_value, pred)[branch]
pop = gen_data_flow_ops._stack_pop(history_value, value.dtype.base_dtype)
pop.set_shape(value.get_shape())
self.grad_context.Exit()
parallel_iterations = self.grad_context.parallel_iterations
if parallel_iterations > 1:
# All pops are ordered after pivot_for_body and before grad_sync.
self.grad_sync._add_control_input(pop.op)
return pop
def GetRealValue(self, value):
"""Get the real value of `value`.
If backprop "uses" a value produced by forward inference, an accumulator
is added in the forward loop to accumulate its values. We use the
accumulated value. This method must be called in the grad loop context.
`value` must be in forward and needed for backprop.
Args:
value: A tensor to be captured.
Returns:
The same tensor obtained from the saved history.
"""
assert value.op.type != "Variable"
real_value = self._history_map.get(value.name)
if real_value is None:
cur_value = value
cur_grad_state = self
while True:
enter_op = _GetLoopConstantEnter(cur_value)
if enter_op:
# Special case: cur_value comes from a constant Enter node.
cur_value = enter_op.inputs[0]
cur_grad_state = cur_grad_state.outer_grad_state
if cur_grad_state is None:
# We are now outside all nested loops for this gradient(),
# so `value` is a loop invariant and there is no need to
# save the history of value. Just make cur_value to enter
# the right control flow context.
real_value = self._grad_context.AddValue(cur_value)
break
else:
# Record the history of this value in forward_ctxt.
# TODO(yuanbyu): Avoid recording constants.
self._grad_context.Exit()
history_value = cur_grad_state.AddForwardAccumulator(cur_value)
self._grad_context.Enter()
break
if real_value is None:
# Add the stack pop op in the grad context.
real_value = cur_grad_state.AddBackPropAccumulatedValue(history_value,
cur_value)
if cur_grad_state != self:
real_value = self._grad_context.AddValue(real_value)
self._history_map[value.name] = real_value
return real_value
def _GetWhileContext(op):
"""Get the WhileContext to which this op belongs."""
ctxt = op._get_control_flow_context()
if ctxt:
ctxt = ctxt.GetWhileContext()
return ctxt
class ControlFlowState(object):
"""Maintain the mapping from the loops to their grad states."""
def __init__(self):
self._map = {} # maps forward loop context to GradLoopState
def GetGradState(self, op, before):
"""Return the grad state for this op if it's in a forward loop context."""
if before and IsLoopExit(op):
forward_ctxt = op._get_control_flow_context()
forward_ctxt = forward_ctxt.outer_context
if forward_ctxt:
forward_ctxt = forward_ctxt.GetWhileContext()
else:
forward_ctxt = _GetWhileContext(op)
if forward_ctxt:
return self._map.get(forward_ctxt)
return None
def ProcessUnusedLoopExits(self, pending_count, to_ops_set):
"""Process all the "unused" loop exits.
The "unused" exits of the loops are added to `unused_exits`. An exit is
unused if its pending_count is 0. If there is an exit with real gradient,
all these deferred exits will enter the backprop loop with zero gradient.
Otherwise, they will enter the backprop loop with None. As an example,
people often write:
```
v1, _ = tf.while_loop(p, b, [x1, x2])
result = gradients(v1, x1)
```
The exit node for x2 is not included by the betweenness analysis. But we
need to backprop x2 if x2 is involved in computing v1.
Args:
pending_count: The number of backprop inputs for every op.
to_ops_set: The set of ops for ys in gradients(ys, xs)
Returns:
The set of unused loop exits that we know at this point we need
to backprop.
"""
loop_exits = []
for forward_ctxt, grad_state in self._map.items():
for y in forward_ctxt.loop_exits:
# pylint: disable=protected-access
if pending_count[y.op._id] == 0:
grad_state.pending_exits_count -= 1
if y.op._id not in to_ops_set:
grad_state.unused_exits.append(y)
if grad_state.pending_exits_count == 0:
loop_exits.extend(grad_state.unused_exits)
# pylint: enable=protected-access
return loop_exits
def EnterGradWhileContext(self, op, before):
"""Enter the WhileContext for gradient computation."""
grad_state = self.GetGradState(op, before)
if grad_state:
grad_state.grad_context.Enter()
def ExitGradWhileContext(self, op, before):
"""Exit the WhileContext for gradient computation."""
grad_state = self.GetGradState(op, before)
if grad_state:
grad_state.grad_context.Exit()
def AddWhileContext(self, op, between_op_list, between_ops):
"""Add the grad state for the while loop that op belongs to.
Note that op is an Exit, and this method must be called in
the control flow context where gradients() is called.
Note that this method modifies `between_op_list` and `between_ops`.
"""
forward_ctxt = _GetWhileContext(op)
grad_state = self._map.get(forward_ctxt)
if grad_state is None:
# This is a new while loop so create a grad state for it.
outer_forward_ctxt = forward_ctxt.outer_context
if outer_forward_ctxt:
outer_forward_ctxt = outer_forward_ctxt.GetWhileContext()
outer_grad_state = None
if outer_forward_ctxt:
outer_grad_state = self._map.get(outer_forward_ctxt)
grad_state = GradLoopState(forward_ctxt, outer_grad_state)
self._map[forward_ctxt] = grad_state
# We need to include all exits of a loop for backprop.
for loop_exit in forward_ctxt.loop_exits:
if not between_ops[loop_exit.op._id]:
between_ops[loop_exit.op._id] = True
between_op_list.append(loop_exit.op)
def ZerosLikeForExit(self, val):
"""Create zeros_like gradient for a loop exit.
If the result of a loop variable is not used but is involved in
computing the result of some needed loop variable, we create a
zero-valued tensor that is fed as gradient for the Exit node of that
loop variable. Note that val.op is an Exit, and this method must be
called in the control flow context where gradients() is called.
Args:
val: The output tensor of an Exit op.
Returns:
A zero tensor of the same shape of val.
"""
val_shape = val.get_shape()
forward_ctxt = val.op._get_control_flow_context()
outer_forward_ctxt = forward_ctxt.outer_context
if outer_forward_ctxt:
outer_forward_ctxt = outer_forward_ctxt.GetWhileContext()
outer_grad_state = None
if outer_forward_ctxt:
outer_grad_state = self._map.get(outer_forward_ctxt)
if outer_grad_state:
# This is a nested loop.
if val_shape.is_fully_defined():
# If the shape is known statically, just create a zero tensor
# with the right shape in the right context.
outer_grad_state.grad_context.Enter()
result = array_ops.zeros(val_shape.dims, val.dtype)
outer_grad_state.grad_context.Exit()
else:
# Only the shape of value is needed for backprop.
forward_ctxt.outer_context.Enter()
shape = array_ops.shape_internal(val, optimize=False)
forward_ctxt.outer_context.Exit()
# Save the shape to a stack.
history_shape = outer_grad_state.AddForwardAccumulator(shape)
# Get the shape back from the stack.
outer_grad_ctxt = outer_grad_state.grad_context
outer_grad_ctxt.Enter()
real_shape = outer_grad_state.AddBackPropAccumulatedValue(
history_shape, shape)
result = array_ops.zeros(real_shape, val.dtype)
outer_grad_ctxt.Exit()
else:
# This is not a nested loop.
if val_shape.is_fully_defined():
# If the shape is known statically, just create a zero tensor
# with the right shape.
result = array_ops.zeros(val_shape.dims, val.dtype)
else:
result = array_ops.zeros_like(val, optimize=False)
return result
def ZerosLike(self, op, index):
"""Create zeros_like for the specified output of an op.
If op is in a while loop that is part of gradients(), this method
must be called in its grad loop context.
Args:
op: A tensorflow operation.
index: the index for a specific output of the op.
Returns:
A zero tensor of the same shape of op.outputs[index].
"""
if IsLoopSwitch(op): return None
dead_branch = IsSwitch(op)
forward_ctxt = _GetWhileContext(op)
if forward_ctxt is None:
# op is not in a while loop that is part of gradients().
return ZerosLikeOutsideLoop(op, index)
op_ctxt = op._get_control_flow_context()
grad_state = self._map.get(forward_ctxt)
val = ops.convert_to_tensor(op.outputs[index], name="tensor")
shape = val.get_shape()
if shape.is_fully_defined():
# If the shape is known statically, just create a zero tensor with
# the right shape in the grad loop context.
result = constant_op.constant(0, shape=shape.dims, dtype=val.dtype)
if dead_branch:
# op is a cond switch. Guard the zero tensor with a switch.
pred = grad_state.history_map.get(op_ctxt.pred.name)
branch = op_ctxt.branch
result = _SwitchRefOrTensor(result, pred)[1 - branch]
else:
# Unknown shape so keep a history of the shape at runtime.
if dead_branch:
# Need to add a special switch to guard the value.
pred = op_ctxt.pred
branch = op_ctxt.branch
op_ctxt.outer_context.Enter()
val = _SwitchRefOrTensor(op.inputs[0], pred)[1 - branch]
zeros_shape = array_ops.shape_internal(val, optimize=False)
op_ctxt.outer_context.Exit()
val.op._set_control_flow_context(op_ctxt)
zeros_shape.op._set_control_flow_context(op_ctxt)
else:
op_ctxt.Enter()
zeros_shape = array_ops.shape_internal(val, optimize=False)
op_ctxt.Exit()
# Add forward accumulator for shape.
grad_state.grad_context.Exit()
history_zeros_shape = grad_state.AddForwardAccumulator(
zeros_shape, dead_branch=dead_branch)
grad_state.grad_context.Enter()
# Create a zero tensor with the right shape.
shape = grad_state.AddBackPropAccumulatedValue(
history_zeros_shape, zeros_shape, dead_branch)
result = array_ops.zeros(shape, val.dtype)
return result
def PostProcessing(self):
"""Perform postprocessing at the end of gradients().
We have created the gradient graph at this point. So this function
can be used to perform any postprocessing on the gradient graph.
We currently perform the following postprocessing:
1. Patch the gradient graph if the output of a loop variable
doesn't depend on its input.
"""
for _, grad_state in self._map.items():
for _, b_merge in grad_state.switch_map.items():
if b_merge.op.inputs[0] == b_merge.op.inputs[1]:
# The value of this loop variable at iteration i+1 doesn't
# depend on its value at iteration i. So use zeros as the
# gradients for all iterations > 0.
dtype = b_merge.op.inputs[0].dtype
shape = b_merge.op.inputs[0].get_shape()
# pylint: disable=protected-access
if shape.is_fully_defined():
grad_state.grad_context.Enter()
# Create a zeros and use it for iterations > 0.
grad_val = constant_op.constant(0, dtype=dtype, shape=shape)
next_grad_val = _NextIteration(grad_val)
grad_state.grad_context.Exit()
else:
# Create a zeros in the outer grad context.
outer_grad_ctxt = grad_state.grad_context.outer_context
if outer_grad_ctxt: outer_grad_ctxt.Enter()
enter_grad_op = b_merge.op.inputs[0].op
enter_grad = enter_grad_op.inputs[0]
grad_shape = array_ops.shape_internal(enter_grad, optimize=False)
grad_val = array_ops.zeros(grad_shape)
if outer_grad_ctxt: outer_grad_ctxt.Exit()
# Use the zeros for iterations > 0.
grad_state.grad_context.Enter()
next_grad_val = _NextIteration(grad_val)
grad_state.grad_context.Exit()
b_merge.op._update_input(1, next_grad_val)
# pylint: enable=protected-access
def MaybeCreateControlFlowState(between_op_list, between_ops,
colocate_gradients_with_ops):
"""Create the state for all the while loops involved in one gradients().
We create a ControlFlowState when there are while loops involved in
gradients(). In gradients(), control flow logic is only invoked when
the ControlFlowState is not None.
Note that this method modifies `between_op_list` and `between_ops`.
"""
loop_state = None
for op in between_op_list:
if IsLoopExit(op):
if loop_state is None:
loop_state = ControlFlowState()
if colocate_gradients_with_ops:
with ops.colocate_with(op):
loop_state.AddWhileContext(op, between_op_list, between_ops)
else:
loop_state.AddWhileContext(op, between_op_list, between_ops)
return loop_state
def IsSwitch(op):
"""Return true if `op` is a Switch."""
return op.type == "Switch" or op.type == "RefSwitch"
def IsLoopExit(op):
"""Return true if `op` is an Exit."""
return op.type == "Exit" or op.type == "RefExit"
def IsLoopSwitch(op):
"""Return true if `op` is the Switch for a while loop."""
if IsSwitch(op):
ctxt = op._get_control_flow_context()
return ctxt and isinstance(ctxt, WhileContext)
return False
def ZerosLikeOutsideLoop(op, index):
"""Create zeros_like for the specified output of an op."""
val = op.outputs[index]
if not IsSwitch(op):
return array_ops.zeros_like(val, optimize=False)
else:
op_ctxt = op._get_control_flow_context()
pred = op_ctxt.pred
branch = op_ctxt.branch
switch_val = switch(op.inputs[0], pred)[1 - branch]
zeros_shape = array_ops.shape_internal(switch_val, optimize=False)
return array_ops.zeros(zeros_shape, dtype=val.dtype)
class ControlFlowContext(object):
"""The base class for control flow context.
The usage pattern is a sequence of (Enter, Exit) followed by a final
ExitResult.
We maintain the following state for control flow contexts during graph
construction:
1. graph has _control_flow_context: the current context used to
construct new nodes. Changed by ctxt.Enter() and ctxt.Exit()
2. op has _control_flow_context: the context to which the op belongs.
Set at the time the op is created. Immutable.
3. A ControlFlowContext has _outer_context: the context in which this
context is created. Set at the time a context is created. Immutable.
4. A ControlFlowContext has _context_stack.
Pushed and popped by ctxt.Enter() and ctxt.Exit()
"""
def __init__(self, values_def=None):
self._outer_context = ops.get_default_graph()._get_control_flow_context()
self._context_stack = []
if values_def:
self._init_values_from_proto(values_def)
else:
# Values that have been already seen in this context.
self._values = set()
# Values referenced by but external to this context.
self._external_values = {}
def _init_values_from_proto(self, values_def):
"""Initializes values and external_values from `ValuesDef` protocol buffer.
Args:
values_def: `ValuesDef` protocol buffer.
"""
assert isinstance(values_def, control_flow_pb2.ValuesDef)
self._values = set(values_def.values)
g = ops.get_default_graph()
self._external_values = {}
for k, v in values_def.external_values.items():
self._external_values[k] = g.as_graph_element(v)
op_names = set([op.split(":")[0]
for op in self._values - set(self._external_values)])
for op in op_names:
# pylint: disable=protected-access
g.as_graph_element(op)._set_control_flow_context(self)
# pylint: enable=protected-access
@property
def outer_context(self):
"""Return the context containing this context."""
return self._outer_context
@property
def grad_state(self):
raise NotImplementedError("Abstract method")
@property
def back_prop(self):
raise NotImplementedError("Abstract method")
def _to_proto(self):
"""Converts the values to a `ValuesDef` protocol buffer.
Returns:
A `ValuesDef` protocol buffer.
"""
values_def = control_flow_pb2.ValuesDef()
values_def.values.extend([v for v in sorted(self._values)])
for k, v in self._external_values.items():
values_def.external_values[k] = v.name
return values_def
@staticmethod
def _from_proto(values_def):
"""Returns a `ControlFlowContext` created from `values_def`."""
return ControlFlowContext(values_def=values_def)
def AddName(self, name):
self._values.add(name)
# pylint: disable=protected-access
def Enter(self):
"""Enter this control flow context."""
graph = ops.get_default_graph()
self._context_stack.append(graph._get_control_flow_context())
graph._set_control_flow_context(self)
def Exit(self):
"""Exit this control flow context."""
graph = ops.get_default_graph()
last_context = self._context_stack.pop()
graph._set_control_flow_context(last_context)
def ExitResult(self, result):
"""Make a list of tensors available in the outer context."""
if self._outer_context:
for x in result:
self._outer_context.AddName(x.name)
def GetWhileContext(self):
"""Return the while context containing this context."""
if self._outer_context:
return self._outer_context.GetWhileContext()
return None
def _IsInOuterContext(self, op):
op_ctxt = _GetOutputContext(op)
outer_ctxt = self.outer_context
while outer_ctxt != op_ctxt:
if outer_ctxt is None:
return False
outer_ctxt = outer_ctxt.outer_context
return True
def _MaybeAddToWhileContext(self, op):
"""Add a control dependency to the containing WhileContext.
The added control dependency ensures that the outputs of this op
belong to the WhileContext. Do nothing if the op is not contained
in a WhileContext.
Args:
op: An operation.
"""
while_ctxt = self.GetWhileContext()
if while_ctxt is not None:
op._add_control_input(while_ctxt.GetControlPivot().op)
def _MaybeRemoveExternalControlEdges(self, op):
"""Remove any external control dependency on this op."""
while_ctxt = self.GetWhileContext()
# A control input of `op` is internal if it is in the same while
# loop context as the enclosing while loop context of self.
if while_ctxt is None:
internal_control_inputs = op.control_inputs
else:
internal_control_inputs = []
for x in op.control_inputs:
ctxt = _GetOutputContext(x)
if ctxt is not None and ctxt.GetWhileContext() == while_ctxt:
internal_control_inputs.append(x)
if len(internal_control_inputs) != len(op.control_inputs):
del op.control_inputs[:]
op._add_control_inputs(internal_control_inputs)
return internal_control_inputs
# pylint: enable=protected-access
class CondContext(ControlFlowContext):
"""The context for the conditional construct."""
def __init__(self, pred=None, pivot=None, branch=None,
name="cond_text", context_def=None):
self._name = ops.get_default_graph().unique_name(name)
if context_def:
self._init_from_proto(context_def)
else:
# Initializes the default fields.
ControlFlowContext.__init__(self)
self._pred = pred # The boolean tensor for the cond predicate
self._pivot = pivot # The predicate tensor in this branch
self._branch = branch # 0 or 1 representing this branch
# Values considered to have been already seen in this context.
self._values.add(pred.name)
self._values.add(pivot.name)
def _init_from_proto(self, context_def):
"""Creates a new `CondContext` from protocol buffer.
Args:
context_def: `CondContextDef` protocol buffer.
"""
assert isinstance(context_def, control_flow_pb2.CondContextDef)
# Create from context_def.
g = ops.get_default_graph()
self._name = context_def.context_name
self._pred = g.as_graph_element(context_def.pred_name)
self._pivot = g.as_graph_element(context_def.pivot_name)
self._branch = context_def.branch
super(CondContext, self).__init__(values_def=context_def.values_def)
@property
def name(self):
return self._name
@property
def pred(self):
return self._pred
@property
def pivot(self):
return self._pivot
@property
def branch(self):
return self._branch
@property
def grad_state(self):
if self.GetWhileContext():
return self.GetWhileContext().grad_state
return None
@property
def back_prop(self):
if self.GetWhileContext():
self.GetWhileContext().back_prop
return False
def GetControlPivot(self):
return self._pivot
def to_proto(self):
"""Converts a `CondContext` to a `CondContextDef` protocol buffer.
Returns:
A `CondContextDef` protocol buffer.
"""
context_def = control_flow_pb2.CondContextDef()
context_def.context_name = self.name
context_def.pred_name = self._pred.name
context_def.pivot_name = self._pivot.name
context_def.branch = self._branch
context_def.values_def.MergeFrom(super(CondContext, self)._to_proto())
return context_def
@staticmethod
def from_proto(context_def):
"""Returns a `CondContext` object created from `context_def`."""
return CondContext(context_def=context_def)
def AddValue(self, val):
"""Add `val` to the current context and its outer context recursively."""
if val.name in self._values:
# Use the real value if it comes from outer context. This is needed in
# particular for nested conds.
result = self._external_values.get(val.name)
result = val if result is None else result
else:
result = val
self._values.add(val.name)
if self._outer_context:
result = self._outer_context.AddValue(val)
self._values.add(result.name)
with ops.control_dependencies(None):
result = _SwitchRefOrTensor(result, self._pred)[self._branch]
# pylint: disable=protected-access
result.op._set_control_flow_context(self)
# pylint: enable=protected-access
self._values.add(result.name)
self._external_values[val.name] = result
return result
def AddOp(self, op):
self._AddOpInternal(op)
def _AddOpInternal(self, op):
"""Add `op` to the current context."""
if not op.inputs:
# Remove any external control dependency on this op
self._MaybeRemoveExternalControlEdges(op)
# Add this op to the enclosing while context
self._MaybeAddToWhileContext(op)
# pylint: disable=protected-access
op._add_control_input(self._pivot.op)
# pylint: enable=protected-access
for x in op.outputs:
self._values.add(x.name)
else:
for index in range(len(op.inputs)):
x = op.inputs[index]
if x.name not in self._values:
self._values.add(x.name)
# Add this value to the parent contexts up to the context that
# creates this value.
real_x = x
if self._outer_context:
real_x = self._outer_context.AddValue(x)
self._values.add(real_x.name)
real_x = _SwitchRefOrTensor(real_x, self._pred)[self._branch]
self._external_values[x.name] = real_x
x = self._external_values.get(x.name)
if x is not None:
op._update_input(index, x)
for x in op.outputs:
self._values.add(x.name)
if self._outer_context or not IsLoopExit(op):
op.graph.prevent_fetching(op)
def _ProcessOutputTensor(self, val):
"""Process an output tensor of a conditional branch."""
real_val = val
if val.name not in self._values:
# Handle the special case of lambda: x
self._values.add(val.name)
if self._outer_context:
real_val = self._outer_context.AddValue(val)
self._values.add(real_val.name)
real_val = _SwitchRefOrTensor(real_val, self._pred)[self._branch]
self._external_values[val.name] = real_val
else:
external_val = self._external_values.get(val.name)
if external_val is not None:
real_val = external_val
return real_val
def BuildCondBranch(self, fn):
"""Add the subgraph defined by fn() to the graph."""
r = fn()
original_r = r
result = []
if r is not None:
if not isinstance(r, list) and not isinstance(r, _basetuple):
r = [r]
original_r = [original_r]
r = _convert_tensorarrays_to_flows(r)
for v in r:
real_v = v
if isinstance(v, ops.Operation):
# Use pivot as the proxy for this op.
real_v = with_dependencies([v], self._pivot)
else:
if isinstance(v, (ops.IndexedSlices, ops.SparseTensor)):
values = self._ProcessOutputTensor(v.values)
indices = self._ProcessOutputTensor(v.indices)
if isinstance(v, ops.IndexedSlices):
dense_shape = v.dense_shape
if dense_shape is not None:
dense_shape = self._ProcessOutputTensor(dense_shape)
real_v = ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = self._ProcessOutputTensor(v.shape)
real_v = ops.SparseTensor(indices, values, dense_shape)
else:
real_v = self._ProcessOutputTensor(v)
result.append(real_v)
return original_r, result
def cond(pred, fn1, fn2, name=None):
"""Return either fn1() or fn2() based on the boolean predicate `pred`.
`fn1` and `fn2` both return lists of output tensors. `fn1` and `fn2` must have
the same non-zero number and type of outputs.
Note that the conditional execution applies only to the operations defined in
fn1 and fn2. Consider the following simple program:
```python
z = tf.mul(a, b)
result = tf.cond(x < y, lambda: tf.add(x, z), lambda: tf.square(y))
```
If x < y, the `tf.add` operation will be executed and tf.square
operation will not be executed. Since z is needed for at least one
branch of the cond, the tf.mul operation is always executed, unconditionally.
Although this behavior is consistent with the dataflow model of TensorFlow,
it has occasionally surprised some users who expected a lazier semantics.
Args:
pred: A scalar determining whether to return the result of `fn1` or `fn2`.
fn1: The callable to be performed if pred is true.
fn2: The callable to be performed if pref is false.
name: Optional name prefix for the returned tensors.
Returns:
Tensors returned by the call to either `fn1` or `fn2`. If the callables
return a singleton list, the element is extracted from the list.
Raises:
TypeError: if `fn1` or `fn2` is not callable.
ValueError: if `fn1` and `fn2` do not return the same number of tensors, or
return tensors of different types.
Example:
```python
x = tf.constant(2)
y = tf.constant(5)
def f1(): return tf.mul(x, 17)
def f2(): return tf.add(y, 23)
r = cond(tf.less(x, y), f1, f2)
# r is set to f1().
# Operations in f2 (e.g., tf.add) are not executed.
```
"""
with ops.name_scope(name, "cond", [pred]) as name:
if not callable(fn1):
raise TypeError("fn1 must be callable.")
if not callable(fn2):
raise TypeError("fn2 must be callable.")
# Add the Switch to the graph.
if isinstance(pred, bool):
raise TypeError("pred must not be a Python bool")
p_2, p_1 = switch(pred, pred)
pivot_1 = array_ops.identity(p_1, name="switch_t")
pivot_2 = array_ops.identity(p_2, name="switch_f")
pred = array_ops.identity(pred, name="pred_id")
# Disable the fetching of tensors that are only on one branch of cond.
for tensor in [p_1, p_2, pivot_1, pivot_2, pred]:
tensor.op.graph.prevent_fetching(tensor.op)
# Build the graph for the true branch in a new context.
context_t = CondContext(pred, pivot_1, branch=1)
context_t.Enter()
orig_res, res_t = context_t.BuildCondBranch(fn1)
context_t.ExitResult(res_t)
context_t.Exit()
# Build the graph for the false branch in a new context.
context_f = CondContext(pred, pivot_2, branch=0)
context_f.Enter()
_, res_f = context_f.BuildCondBranch(fn2)
context_f.ExitResult(res_f)
context_f.Exit()
# Add the final merge to the graph.
if len(res_t) != len(res_f):
raise ValueError("fn1 and fn2 must return the same number of results.")
if not res_t:
raise ValueError("fn1 and fn2 must return at least one result.")
for x, y in zip(res_f, res_t):
assert ((isinstance(x, ops.IndexedSlices) and
isinstance(y, ops.IndexedSlices)) or
(isinstance(x, ops.SparseTensor) and
isinstance(y, ops.SparseTensor)) or
(isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor)))
val_x = x if isinstance(x, ops.Tensor) else x.values
val_y = y if isinstance(y, ops.Tensor) else y.values
if val_x.dtype.base_dtype != val_y.dtype.base_dtype:
raise ValueError("Outputs of fn1 and fn2 must have the same type: "
"%s, %s" % (val_x.dtype.name, val_y.dtype.name))
merges = [merge([x[0], x[1]])[0] for x in zip(res_f, res_t)]
merges = _convert_flows_to_tensorarrays(orig_res, merges)
# Add to collections
ops.add_to_collection(ops.GraphKeys.COND_CONTEXT, context_t)
ops.add_to_collection(ops.GraphKeys.COND_CONTEXT, context_f)
return merges[0] if len(merges) == 1 else merges
# TODO(yuanbyu): Consider having a unified notion of context for
# not only conditionals and loops but also control dependency and
# subgraphs.
class WhileContext(ControlFlowContext):
"""The context for the loop construct."""
def __init__(self, parallel_iterations=10, back_prop=True, swap_memory=False,
name="while_context", grad_state=None, context_def=None):
if context_def:
self._init_from_proto(context_def)
else:
ControlFlowContext.__init__(self)
self._init_from_args(parallel_iterations, back_prop, swap_memory,
name)
# The gradient loop state.
self._grad_state = grad_state
def _init_from_args(self, parallel_iterations, back_prop, swap_memory,
name):
"""Creates a new `WhileContext` from arguments.
Args:
parallel_iterations: The number of iterations allowed to run in parallel.
back_prop: Whether backprop is enabled for this while loop.
swap_memory: Whether GPU-CPU memory swap is enabled for this loop.
name: Optional name prefix for the returned tensors.
Raises:
ValueError: If `parallel_iterations` has invalid value.
"""
if not isinstance(parallel_iterations, int) or (parallel_iterations <= 0):
raise ValueError("`parallel_iterations` must be a positive integer: "
"%s" % parallel_iterations)
self._name = ops.get_default_graph().unique_name(name)
self._parallel_iterations = parallel_iterations
self._back_prop = back_prop
self._swap_memory = swap_memory
# We use this node to control constants created by the pred lambda.
self._pivot_for_pred = None
# We use this node to control constants created by the body lambda.
self._pivot_for_body = None
# The boolean tensor for loop termination condition. Used in code
# generation for gradient computation
self._pivot = None
# The list of exit tensors for loop variables.
self._loop_exits = []
def _init_from_proto(self, context_def):
"""Creates a new `WhileContext` from protocol buffer.
Args:
context_def: `WhileContextDef` protocol buffer.
"""
assert isinstance(context_def, control_flow_pb2.WhileContextDef)
# Create from context_def.
g = ops.get_default_graph()
self._name = context_def.context_name
self._parallel_iterations = context_def.parallel_iterations
self._back_prop = context_def.back_prop
self._swap_memory = context_def.swap_memory
self._pivot_for_pred = g.as_graph_element(context_def.pivot_for_pred_name)
# We use this node to control constants created by the body lambda.
self._pivot_for_body = g.as_graph_element(context_def.pivot_for_body_name)
# The boolean tensor for loop termination condition. Used in code
# generation for gradient computation.
self._pivot = g.as_graph_element(context_def.pivot_name)
# The list of exit tensors for loop variables.
self._loop_exits = [g.as_graph_element(exit_name)
for exit_name in context_def.loop_exit_names]
super(WhileContext, self).__init__(values_def=context_def.values_def)
@property
def name(self):
return self._name
@property
def parallel_iterations(self):
"""The number of iterations allowed to run in parallel."""
return self._parallel_iterations
@property
def back_prop(self):
"""True iff backprop is enabled for this while loop."""
return self._back_prop
@property
def swap_memory(self):
"""True iff GPU-CPU memory swap is enabled for this while loop."""
return self._swap_memory
@property
def pivot(self):
"""The boolean tensor representing the loop termination condition."""
return self._pivot
@property
def loop_exits(self):
"""The list of exit tensors for loop variables."""
return self._loop_exits
@property
def grad_state(self):
"""The gradient loop state."""
return self._grad_state
def to_proto(self):
"""Converts a `WhileContext` to a `WhileContextDef` protocol buffer.
Returns:
A `WhileContextDef` protocol buffer.
"""
context_def = control_flow_pb2.WhileContextDef()
context_def.context_name = self.name
context_def.parallel_iterations = self._parallel_iterations
context_def.back_prop = self._back_prop
context_def.swap_memory = self._swap_memory
context_def.pivot_for_pred_name = self._pivot_for_pred.name
context_def.pivot_for_body_name = self._pivot_for_body.name
context_def.pivot_name = self._pivot.name
if self._loop_exits:
context_def.loop_exit_names.extend([l.name for l in self._loop_exits])
context_def.values_def.MergeFrom(super(WhileContext, self)._to_proto())
return context_def
@staticmethod
def from_proto(context_def):
"""Returns a `WhileContext` object created from `context_def`."""
return WhileContext(context_def=context_def)
def GetWhileContext(self):
return self
def GetControlPivot(self):
if self._pivot_for_body is not None:
return self._pivot_for_body
return self._pivot_for_pred
def AddValue(self, val):
"""Add `val` to the current context and its outer context recursively."""
result = val
if val.name not in self._values:
self._values.add(val.name)
# If we are in a grad context and val is from its forward context,
# use GetRealValue(), which adds the logic to save the history of
# val in forward.
grad_ctxt = ops.get_default_graph()._get_control_flow_context()
if grad_ctxt:
grad_ctxt = grad_ctxt.GetWhileContext()
if grad_ctxt.grad_state:
forward_ctxt = _GetWhileContext(val.op)
if IsLoopExit(val.op):
forward_ctxt = forward_ctxt.outer_context
if forward_ctxt == grad_ctxt.grad_state.forward_context:
real_val = grad_ctxt.grad_state.GetRealValue(val)
self._external_values[val.name] = real_val
return real_val
if self._outer_context is not None:
result = self._outer_context.AddValue(val)
# Create an Enter to make `result` known to this loop context.
with ops.control_dependencies(None):
enter = _Enter(result, self._name, is_constant=True,
parallel_iterations=self._parallel_iterations)
# Fix the control inputs and control flow context of these enter ops.
self._FixControlInputsAndContext([enter])
# Add `enter` in this context.
self._values.add(enter.name)
self._external_values[val.name] = enter
result = enter
else:
actual_val = self._external_values.get(val.name)
if actual_val is not None:
result = actual_val
return result
def AddOp(self, op):
"""Add `op` to the current context."""
# For a reduction op, if op is in a grad context and its input is from
# its forward context, moving op to the forward context means we would
# store the tensor after the reduction as opposed to the tensor before
# reduction, and therefore could significantly reduce memory consumption.
# For now, we do this only for a few ops.
if op.type in {"Shape", "Size", "Rank"}:
grad_ctxt = ops.get_default_graph()._get_control_flow_context()
if grad_ctxt:
grad_ctxt = grad_ctxt.GetWhileContext()
if grad_ctxt.grad_state:
op_input_forward_ctxt = _GetWhileContext(op.inputs[0].op)
if op_input_forward_ctxt == grad_ctxt.grad_state.forward_context:
op_input_ctxt = op.inputs[0].op._get_control_flow_context()
op._set_control_flow_context(op_input_ctxt)
op_input_ctxt._AddOpInternal(op)
return
self._AddOpInternal(op)
def _AddOpInternal(self, op):
"""Add `op` to the current context.
In the case that op has only external data inputs, we remove all of its
external control inputs so all its inputs are in the same while loop
context. This is valid because op now has an Enter input that has all
the right control dependency.
"""
if not op.inputs:
# Remove any external control dependency on this op
control_inputs = self._MaybeRemoveExternalControlEdges(op)
# Add a control edge from the control pivot to this op.
if not control_inputs:
# pylint: disable=protected-access
op._add_control_input(self.GetControlPivot().op)
# pylint: enable=protected-access
for x in op.outputs:
self._values.add(x.name)
else:
has_internal_data_input = False
for index in range(len(op.inputs)):
x = op.inputs[index]
self.AddValue(x)
real_x = self._external_values.get(x.name)
if real_x is not None:
op._update_input(index, real_x)
else:
has_internal_data_input = True
if not has_internal_data_input:
# Remove any external control dependency on this op
self._MaybeRemoveExternalControlEdges(op)
# Add a control dependency to prevent loop invariants from
# enabling ops that should not be executed.
self._MaybeAddControlDependency(op)
for x in op.outputs:
self._values.add(x.name)
if self._outer_context or not IsLoopExit(op):
op.graph.prevent_fetching(op)
def _MaybeAddControlDependency(self, op):
"""Add a control input to the op if it only depends on loop invariants."""
def _IsOpFree(op):
if op.control_inputs:
return False
for x in op.inputs:
if not _IsLoopConstantEnter(x.op):
return False
return True
if _IsOpFree(op):
# pylint: disable=protected-access
op._add_control_input(self.GetControlPivot().op)
# pylint: enable=protected-access
def AddForwardLoopCounter(self, outer_grad_state):
"""Adds a loop that counts the number of iterations.
This is added to the forward loop at the time when we start to
create the loop for backprop gradient computation. Called in
the outer context of this forward context.
The pseudocode is:
`n = 0; while (_pivot) { n++; }`
Note that a control dependency is added to `n` to ensure the correct
execution order of stack push ops.
Args:
outer_grad_state: The outer grad state. None if not nested.
Returns:
The number of iterations taken by the forward loop and the loop index.
"""
n = constant_op.constant(0, name="f_count")
if outer_grad_state is not None:
# Force the stack pushes of i-th execution of an inner loop to be ordered
# before the pushes of (i+1)-th execution of the same inner loop.
outer_add_op = outer_grad_state.forward_index.op.inputs[0].op
n.op._add_control_input(outer_add_op) # pylint: disable=protected-access
self.Enter()
self.AddName(n.name)
enter_n = _Enter(n, self._name, is_constant=False,
parallel_iterations=self._parallel_iterations,
name="f_count")
merge_n = merge([enter_n, enter_n])[0]
switch_n = switch(merge_n, self._pivot)
index = math_ops.add(switch_n[1], 1)
next_n = _NextIteration(index)
merge_n.op._update_input(1, next_n)
total_iterations = exit(switch_n[0], name="f_count")
self.ExitResult([total_iterations])
self.Exit()
return total_iterations, next_n
def AddBackPropLoopCounter(self, count, outer_grad_state):
"""Add the backprop loop that controls the iterations.
This is added to the backprop loop. It is used to control the loop
termination of the backprop loop. Called in the outer context of
this grad context.
The pseudocode is:
`n = count; while (n >= 1) { n--; }`
Note that a control dependency is added to `final_zero` to ensure the
correct execution order of stack pop ops.
Args:
count: The number of iterations for backprop.
outer_grad_state: The outer grad state. None if not nested.
Returns:
The loop index.
"""
one = constant_op.constant(1, name="b_count")
self.Enter()
self.AddName(count.name)
enter_count = _Enter(count, self._name, is_constant=False,
parallel_iterations=self._parallel_iterations,
name="b_count")
merge_count = merge([enter_count, enter_count])[0]
self._pivot_for_pred = merge_count
cond = math_ops.greater_equal(merge_count, one)
self._pivot = loop_cond(cond, name="b_count")
switch_count = switch(merge_count, self._pivot)
index = math_ops.sub(switch_count[1], one)
self._pivot_for_body = index
next_count = _NextIteration(index)
merge_count.op._update_input(1, next_count)
final_zero = exit(switch_count[0], name="b_count")
if outer_grad_state is not None:
# Force the stack pops of i-th execution of an inner loop to be ordered
# before the pops of (i+1)-th execution of the same inner loop.
# pylint: disable=protected-access
outer_grad_state.grad_sync._add_control_input(final_zero.op)
# pylint: enable=protected-access
self.ExitResult([final_zero])
self.Exit()
return next_count
def AddBackPropAccumulator(self, op, grad):
"""Add an accumulation loop for every loop invariant.
This is added to the backprop loop. It is used to accumulate partial
gradients within each loop iteration. Called when in the gradient while
context.
The pseudocode is:
```
acc = 0.0;
while (_pivot) {
acc += grad;
}
```
Args:
op: The Enter op for a loop invariant.
grad: The partial gradient of an iteration for a loop invariant.
Returns:
The gradient for a loop invariant.
"""
self.Exit()
# Create a zeros tensor with the right shape for acc. If we don't
# know the full shape statically, we will have to get the shape
# dynamically from the forward inference. Getting the shape right
# for the zeros is only needed for the base case when the loop exits
# without running any iterations.
shape = grad.get_shape()
if shape.is_fully_defined():
if self.outer_context: self.outer_context.Enter()
acc = constant_op.constant(0, grad.dtype, shape=shape, name="b_acc")
if self.outer_context: self.outer_context.Exit()
else:
value = op.inputs[0]
if self.outer_context:
forward_ctxt = self.grad_state.forward_context
forward_ctxt.outer_context.Enter()
zeros_shape = array_ops.shape_internal(value, optimize=False)
forward_ctxt.outer_context.Exit()
outer_grad_state = self.grad_state.outer_grad_state
history_zeros_shape = outer_grad_state.AddForwardAccumulator(
zeros_shape)
self.outer_context.Enter()
real_shape = outer_grad_state.AddBackPropAccumulatedValue(
history_zeros_shape, zeros_shape)
acc = array_ops.zeros(real_shape, grad.dtype)
self.outer_context.Exit()
else:
zeros_shape = array_ops.shape_internal(value, optimize=False)
acc = array_ops.zeros(zeros_shape, grad.dtype)
acc._shape = grad.get_shape() # pylint: disable=protected-access
self.Enter()
self.AddName(acc.name)
enter_acc = _Enter(acc, self._name, is_constant=False,
parallel_iterations=self._parallel_iterations,
name="b_acc")
merge_acc = merge([enter_acc, enter_acc], name="b_acc")[0]
switch_acc_false, switch_acc_true = switch(merge_acc, self._pivot)
add_acc = math_ops.add(switch_acc_true, grad)
next_acc = _NextIteration(add_acc)
merge_acc.op._update_input(1, next_acc) # pylint: disable=protected-access
acc_result = exit(switch_acc_false, name="b_acc")
self.ExitResult([acc_result])
return acc_result
def AddBackPropIndexedSlicesAccumulator(self, op, grad):
"""This is used for accumulating gradients that are IndexedSlices.
This is essentially the equavalent of AddBackPropAccumulator but optimized
for things like updating embeddings from within a while loop.
Args:
op: The Enter op for a loop invariant.
grad: The partial gradients represented as an IndexedSlices.
Returns:
The accumulated IndexedSlices gradient of the loop invariant.
"""
values = grad.values
indices = grad.indices
dense_shape = grad.dense_shape
self.Exit()
if self.outer_context: self.outer_context.Enter()
if values.get_shape().is_fully_defined():
values_shape = tensor_shape.TensorShape(
[tensor_shape.Dimension(1)] + values.get_shape().dims[1:])
if self.outer_context: self.outer_context.Enter()
values_acc = constant_op.constant(0, values.dtype, shape=values_shape,
name="b_acc")
if self.outer_context: self.outer_context.Exit()
else:
values_shape = array_ops.shape_internal(op.inputs[0], optimize=False)[1:]
values_shape = array_ops.concat(0, [[1], values_shape])
values_acc = array_ops.zeros(values_shape, dtype=values.dtype)
indices_acc = constant_op.constant([0], indices.dtype)
shape_acc = None
if dense_shape is not None:
if dense_shape.get_shape().is_fully_defined():
if self.outer_context: self.outer_context.Enter()
shape_acc = constant_op.constant(0, dense_shape.dtype,
shape=dense_shape.get_shape())
if self.outer_context: self.outer_context.Exit()
else:
shape_acc = array_ops.zeros_like(
array_ops.shape_internal(op.inputs[0], optimize=False),
optimize=False)
if self.outer_context: self.outer_context.Exit()
self.Enter()
self.AddName(values_acc.name)
self.AddName(indices_acc.name)
init_acc = [indices_acc, values_acc]
if shape_acc is not None:
self.AddName(shape_acc.name)
init_acc.append(shape_acc)
enter_acc = [_Enter(x, self._name, is_constant=False,
parallel_iterations=self._parallel_iterations,
name="b_acc") for x in init_acc]
merge_acc = [merge([x, x], name="b_acc")[0] for x in enter_acc]
switch_acc = [switch(x, self._pivot) for x in merge_acc]
# The actual accumulation.
acc_indexed_slices = [array_ops.concat(0, [xa[1], xv])
for xa, xv in zip(switch_acc[:2], [indices, values])]
if shape_acc is not None:
# For the shape we just keep the maximum
acc_indexed_slices.append(
math_ops.maximum(dense_shape, switch_acc[2][1]))
next_acc = [_NextIteration(x) for x in acc_indexed_slices]
for xm, xn in zip(merge_acc, next_acc):
xm.op._update_input(1, xn) # pylint: disable=protected-access
acc_exits = [exit(x[0], name="b_acc") for x in switch_acc]
self.ExitResult(acc_exits)
return ops.IndexedSlices(
indices=acc_exits[0], values=acc_exits[1],
dense_shape=acc_exits[2] if shape_acc is not None else None)
def _InitializeValues(self, values):
"""Makes the values known to this context."""
self._values = set()
for x in values:
if isinstance(x, ops.Tensor):
self._values.add(x.name)
else:
self._values.add(x.values.name)
self._values.add(x.indices.name)
if isinstance(x, ops.IndexedSlices):
dense_shape = x.dense_shape
elif isinstance(x, ops.SparseTensor):
dense_shape = x.shape
else:
raise TypeError("Type %s not supported" % type(x))
if dense_shape is not None:
self._values.add(dense_shape.name)
def _BuildLoop(self, pred, body, original_loop_vars, loop_vars,
shape_invariants):
"""Core: Add the loop termination condition and body to the graph."""
flat_loop_vars = nest.flatten(original_loop_vars)
# Let the context know the loop variables so the loop variables
# would be added in the outer contexts properly.
self._InitializeValues(loop_vars)
real_vars = loop_vars
if self._outer_context:
real_vars = [self._outer_context.AddValue(x) for x in loop_vars]
with ops.control_dependencies(None):
enter_vars = [_Enter(x, self._name, is_constant=False,
parallel_iterations=self._parallel_iterations,
use_input_shape=(shape_invariants is None))
for x in real_vars]
if self._outer_context:
control_pivot = self._outer_context.GetControlPivot().op
for var in enter_vars:
if _IsLoopConstantEnter(var.op.inputs[0].op):
# pylint: disable=protected-access
var.op._add_control_input(control_pivot)
# pylint: enable=protected-access
_SetShapeInvariants(real_vars, enter_vars, shape_invariants)
# Fix the control inputs and control flow context of these enter ops.
self._FixControlInputsAndContext(enter_vars)
self._InitializeValues(enter_vars)
merge_vars = [merge([x, x])[0] for x in enter_vars]
self._pivot_for_pred = merge_vars[0]
# Build the graph for pred.
merge_vars_with_tensor_arrays = (
_convert_flows_to_tensorarrays(flat_loop_vars, merge_vars))
packed_vars = nest.pack_sequence_as(
structure=original_loop_vars,
flat_sequence=merge_vars_with_tensor_arrays)
c = ops.convert_to_tensor(pred(*packed_vars))
self._pivot = loop_cond(c, name="LoopCond")
switch_vars = [_SwitchRefOrTensor(x, self._pivot) for x in merge_vars]
# Build the graph for body.
vars_for_body = [_Identity(x[1]) for x in switch_vars]
self._pivot_for_body = vars_for_body[0]
# Convert TensorArray flow variables inside the context back into
# their associated TensorArrays for calling the body.
vars_for_body_with_tensor_arrays = (
_convert_flows_to_tensorarrays(flat_loop_vars, vars_for_body))
packed_vars_for_body = nest.pack_sequence_as(
structure=original_loop_vars,
flat_sequence=vars_for_body_with_tensor_arrays)
body_result = body(*packed_vars_for_body)
if not nest.is_sequence(body_result):
body_result = [body_result]
# Compare the structure types of input and output of body.
# For backwards compatibility, the first layer is forced to a list
# during this comparison, because inputs are typically lists and
# outputs of the body are typically tuples.
nest.assert_same_structure(list(packed_vars_for_body), list(body_result))
# Store body_result to keep track of TensorArrays returned by body
original_body_result = body_result
# Convert TensorArrays returned by body into their flow variables
flat_result = nest.flatten(body_result)
result = _convert_tensorarrays_to_flows(flat_result)
result = ops.convert_n_to_tensor_or_indexed_slices(result)
# Add NextIteration and the back edges to complete the loop.
if len(merge_vars) != len(result):
raise ValueError("Number of inputs and outputs of body must match "
"loop_vars: %d, %d" % (len(merge_vars), len(result)))
next_vars = []
for m, v in zip(merge_vars, result):
next_vars.append(_AddNextAndBackEdge(m, v))
# Add the exit ops.
exit_vars = [exit(x[0]) for x in switch_vars]
self._loop_exits = exit_vars
# Make sure the shapes of loop outputs are correct.
for m_var, n_var in zip(merge_vars, next_vars):
if isinstance(m_var, ops.Tensor):
_EnforceShapeInvariant(m_var, n_var)
# Exit the loop.
self.ExitResult(exit_vars)
return original_body_result, exit_vars
def BuildLoop(self, pred, body, loop_vars, shape_invariants):
"""Add the loop termination condition and body to the graph."""
# Keep original_loop_vars to identify which are TensorArrays
original_loop_vars = loop_vars
flat_loop_vars = nest.flatten(loop_vars)
# Convert TensorArrays to their flow variables
loop_vars = _convert_tensorarrays_to_flows(flat_loop_vars)
loop_vars = ops.convert_n_to_tensor_or_indexed_slices(loop_vars)
try:
self.Enter()
original_body_result, exit_vars = self._BuildLoop(
pred, body, original_loop_vars, loop_vars, shape_invariants)
finally:
self.Exit()
flat_result = nest.flatten(original_body_result)
# Convert TensorArray flow variables outside the context back into
# their associated TensorArrays for returning to caller.
exit_vars_with_tensor_arrays = (
_convert_flows_to_tensorarrays(flat_result, exit_vars))
packed_exit_vars = nest.pack_sequence_as(
structure=original_body_result,
flat_sequence=exit_vars_with_tensor_arrays)
return (packed_exit_vars[0] if len(exit_vars) == 1
else packed_exit_vars)
def _FixControlInputsAndContext(self, enters):
graph = ops.get_default_graph()
# pylint: disable=protected-access
for e in enters:
if isinstance(e, ops.Tensor):
xs = [e]
else:
if not isinstance(e, (ops.IndexedSlices, ops.SparseTensor)):
raise TypeError("Type %s not supported" % type(e))
xs = [e.values, e.indices]
shape = e.dense_shape if isinstance(e, ops.IndexedSlices) else e.shape
if shape is not None:
xs.append(shape)
for x in xs:
inp_op = x.op.inputs[0]
control_inputs = graph._control_dependencies_for_inputs([inp_op])
outer_control_inputs = [op for op in control_inputs
if self._IsInOuterContext(op)]
x.op._set_control_flow_context(self)
x.op._add_control_inputs(outer_control_inputs)
graph._record_op_seen_by_control_dependencies(x.op)
# pylint: enable=protected-access
def while_loop(cond, body, loop_vars, shape_invariants=None,
parallel_iterations=10, back_prop=True, swap_memory=False,
name=None):
"""Repeat `body` while the condition `cond` is true.
`cond` is a callable returning a boolean scalar tensor. `body` is a callable
returning a (possibly nested) tuple or list of tensors of the same
arity (length and structure) and types as `loop_vars`. `loop_vars` is a
(possibly nested) tuple or list of tensors that is passed to both `cond`
and `body`. `cond` and `body` both take as many arguments as there are
`loop_vars`.
While `cond` evaluates to true, `body` is executed.
In addition to regular Tensors or IndexedSlices, the body may accept and
return TensorArray objects. The flows of the TensorArray objects will
be appropriately forwarded between loops and during gradient calculations.
For correctness, `tf.while_loop()` strictly enforces shape invariants for
the loop variables. A shape invariant is a (possibly partial) shape that
is unchanged across the iterations of the loop. An error will be raised
if the shape of a loop variable after an iteration is determined to be more
general than or incompatible with its shape invariant. For example, a shape
of [11, None] is more general than a shape of [11, 17], and [11, 21] is not
compatible with [11, 17]. By default (if the argument `shape_invariants` is
not specified), it is assumed that the initial shape of each tensor in
`loop_vars` is the same in every iteration. The `shape_invariants` argument
allows the caller to specify a less specific shape invariant for each loop
variable, which is needed if the shape varies between iterations. The
[`Tensor.set_shape()`](../../api_docs/python/framework.md#Tensor.set_shape)
function may also be used in the `body` function to indicate that
the output loop variable has a particular shape. The shape invariant for
SparseTensor and IndexedSlices are treated specially as follows:
a) If a loop variable is a SparseTensor, the shape invariant must be
TensorShape([r]) where r is the rank of the dense tensor represented
by the sparse tensor. It means the shapes of the three tensors of the
SparseTensor are ([None], [None, r], [r]). NOTE: The shape invariant here
is the shape of the SparseTensor.shape property. It must be the shape of
a vector.
b) If a loop variable is an IndexedSlices, the shape invariant must be
a shape invariant of the values tensor of the IndexedSlices. It means
the shapes of the three tensors of the IndexedSlices are (shape, [shape[0]],
[shape.ndims]).
`while_loop` implements non-strict semantics, enabling multiple iterations
to run in parallel. The maximum number of parallel iterations can be
controlled by `parallel_iterations`, which gives users some control over
memory consumption and execution order. For correct programs, `while_loop`
should return the same result for any parallel_iterations > 0.
For training, TensorFlow remembers the tensors that are produced in the
forward inference but needed in back propagation. These tensors can be a
main source of memory consumption and often cause OOM problems when training
on GPUs. When the flag swap_memory is true, we swap out these tensors from
GPU to CPU. This for example allows us to train RNN models with very long
sequences and large batches.
Args:
cond: A callable that represents the termination condition of the loop.
body: A callable that represents the loop body.
loop_vars: A (possibly nested) tuple or list of numpy array, `Tensor`,
and `TensorArray` objects.
shape_invariants: The shape invariants for the loop variables.
parallel_iterations: The number of iterations allowed to run in parallel.
back_prop: Whether backprop is enabled for this while loop.
swap_memory: Whether GPU-CPU memory swap is enabled for this loop.
name: Optional name prefix for the returned tensors.
Returns:
The output tensors for the loop variables after the loop. When the length
of `loop_vars` is 1 this is a Tensor, TensorArray or IndexedSlice and when
the length of `loop_vars` is greater than 1 it returns a list.
Raises:
TypeError: if `cond` or `body` is not callable.
ValueError: if `loop_vars` is empty.
Example:
```python
i = tf.constant(0)
c = lambda i: tf.less(i, 10)
b = lambda i: tf.add(i, 1)
r = tf.while_loop(c, b, [i])
```
Example with nesting:
```python
ijk_0 = (tf.constant(0), (tf.constant(1), tf.constant(2)))
c = lambda i, (j, k): i < 10
b = lambda i, (j, k): (i + 1, ((j + k), (j - k)))
ijk_final = tf.while_loop(c, b, ijk_0)
```
Example using shape_invariants:
```python
i0 = tf.constant(0)
m0 = tf.ones([2, 2])
c = lambda i, m: i < 10
b = lambda i, m: [i+1, tf.concat(0, [m, m])]
tf.while_loop(
c, b, loop_vars=[i0, m0],
shape_invariants=[i0.get_shape(), tensor_shape.TensorShape([None, 2])])
```
"""
with ops.name_scope(name, "while", loop_vars) as name:
if not loop_vars:
raise ValueError("No loop variables provided")
if not callable(cond):
raise TypeError("cond must be callable.")
if not callable(body):
raise TypeError("body must be callable.")
if shape_invariants is not None:
nest.assert_same_structure(loop_vars, shape_invariants)
context = WhileContext(parallel_iterations, back_prop, swap_memory, name)
ops.add_to_collection(ops.GraphKeys.WHILE_CONTEXT, context)
result = context.BuildLoop(cond, body, loop_vars, shape_invariants)
return result
def _AsTensorList(x, p):
"""Return x as a list of Tensors or IndexedSlices.
For entries of `x` that are Operations, this returns an Identity of `p`
with a dependency on the operation.
Args:
x: A Tensor/IndexedSlices/Operation or a list or tuple of them.
p: A Tensor to return for entries in `x` that are Operations.
Returns:
A list of Tensors or IndexedSlices.
"""
if not isinstance(x, (list, _basetuple)):
x = [x]
l = []
for v in x:
if isinstance(v, ops.Operation):
v = with_dependencies([v], p)
v = ops.convert_to_tensor_or_indexed_slices(v)
if isinstance(v, ops.Tensor):
l.append(array_ops.identity(v))
else:
l.append(ops.IndexedSlices(array_ops.identity(v.values),
array_ops.identity(v.indices)))
return l
def _CheckResults(a, b):
assert len(a) == len(b), (
"Values returned by a() and b() must have the same length.")
for x, y in zip(a, b):
assert x.dtype == y.dtype, (
"Values returned by a() [%s] and b() [%s] must have "
"the same type: %s, %s." %
(x.name, y.name, x.dtype.name, y.dtype.name))
def with_dependencies(dependencies, output_tensor, name=None):
"""Produces the content of `output_tensor` only after `dependencies`.
In some cases, a user may want the output of an operation to be
consumed externally only after some other dependencies have run
first. This function ensures returns `output_tensor`, but only after all
operations in `dependencies` have run. Note that this means that there is
no guarantee that `output_tensor` will be evaluated after any `dependencies`
have run.
See also `tuple` and `group`.
Args:
dependencies: A list of operations to run before this op finishes.
output_tensor: A `Tensor` or `IndexedSlices` that will be returned.
name: (Optional) A name for this operation.
Returns:
Same as `output_tensor`.
Raises:
TypeError: if `output_tensor` is not a `Tensor` or `IndexedSlices`.
"""
with ops.name_scope(name, "control_dependency",
dependencies + [output_tensor]) as name:
with ops.colocate_with(output_tensor):
with ops.control_dependencies(dependencies):
output_tensor = ops.convert_to_tensor_or_indexed_slices(output_tensor)
if isinstance(output_tensor, ops.Tensor):
return _Identity(output_tensor, name=name)
else:
return ops.IndexedSlices(_Identity(output_tensor.values, name=name),
output_tensor.indices,
output_tensor.dense_shape)
def _GroupControlDeps(dev, deps, name=None):
with ops.control_dependencies(deps):
if dev is None:
return no_op(name=name)
else:
with ops.device(dev):
return no_op(name=name)
# TODO(touts): Accept "inputs" as a list.
def group(*inputs, **kwargs):
"""Create an op that groups multiple operations.
When this op finishes, all ops in `input` have finished. This op has no
output.
See also `tuple` and `with_dependencies`.
Args:
*inputs: Zero or more tensors to group.
**kwargs: Optional parameters to pass when constructing the NodeDef.
name: A name for this operation (optional).
Returns:
An Operation that executes all its inputs.
Raises:
ValueError: If an unknown keyword argument is provided.
"""
name = kwargs.pop("name", None)
if kwargs:
raise ValueError("Unknown keyword arguments: " + ", ".join(kwargs.keys()))
with ops.name_scope(name, "group_deps", inputs) as name:
# Grouping no inputs means do nothing
if not inputs:
return no_op(name=name)
# Sorts *inputs according to their devices.
ops_on_device = {} # device -> operations specified on the device.
for inp in inputs:
dev = inp.device
if dev in ops_on_device:
ops_on_device[dev].append(inp)
else:
ops_on_device[dev] = [inp]
if len(ops_on_device) == 1:
# 1-level tree. The root node is the returned NoOp node.
(dev, deps), = ops_on_device.items()
return _GroupControlDeps(dev, deps, name=name)
# 2-level tree. The root node is the returned NoOp node.
# deps contains 1 NoOp node for each device.
deps = []
def device_key(dev):
"""A sort key that allows None to be compared to strings."""
return "" if dev is None else dev
for dev in sorted(six.iterkeys(ops_on_device), key=device_key):
deps.append(_GroupControlDeps(dev, ops_on_device[dev]))
with ops.control_dependencies(deps):
return no_op(name=name)
def tuple(tensors, name=None, control_inputs=None):
"""Group tensors together.
This creates a tuple of tensors with the same values as the `tensors`
argument, except that the value of each tensor is only returned after the
values of all tensors have been computed.
`control_inputs` contains additional ops that have to finish before this op
finishes, but whose outputs are not returned.
This can be used as a "join" mechanism for parallel computations: all the
argument tensors can be computed in parallel, but the values of any tensor
returned by `tuple` are only available after all the parallel computations
are done.
See also `group` and `with_dependencies`.
Args:
tensors: A list of `Tensor`s or `IndexedSlices`, some entries can be `None`.
name: (optional) A name to use as a `name_scope` for the operation.
control_inputs: List of additional ops to finish before returning.
Returns:
Same as `tensors`.
Raises:
ValueError: If `tensors` does not contain any `Tensor` or `IndexedSlices`.
TypeError: If `control_inputs` is not a list of `Operation` or `Tensor`
objects.
"""
with ops.name_scope(name, "tuple", tensors) as name:
gating_ops = [t.op for t in tensors if t is not None]
if control_inputs:
for c in control_inputs:
if isinstance(c, ops.Tensor):
c = c.op
elif not isinstance(c, ops.Operation):
raise TypeError("Control input must be Operation or Tensor: %s" % c)
gating_ops.append(c)
# Note that in order to ensure ordering in the pbtxt, we must take care to
# ensure the order here.
gating_ops = sorted(set(gating_ops), key=lambda op: op._id) # Uniquify ops.
if not gating_ops:
raise ValueError("Must have at least one Tensor: %s" % tensors)
gate = group(*gating_ops)
tpl = []
for t in tensors:
if t is not None:
tpl.append(with_dependencies([gate], t))
else:
tpl.append(None)
return tpl
def case(pred_fn_pairs, default, exclusive=False, name="case"):
"""Create a case operation.
The `pred_fn_pairs` parameter is a dict or list of pairs of size N.
Each pair contains a boolean scalar tensor and a python callable that
creates the tensors to be returned if the boolean evaluates to True.
`default` is a callable generating a list of tensors. All the callables
in `pred_fn_pairs` as well as `default` should return the same number
and types of tensors.
If `exclusive==True`, all predicates are evaluated, and a logging operation
with an error is returned if more than one of the predicates evaluates to
True. If `exclusive==False`, execution stops are the first predicate which
evaluates to True, and the tensors generated by the corresponding function
are returned immediately. If none of the predicates evaluate to True, this
operation returns the tensors generated by `default`.
Example 1:
Pseudocode:
```
if (x < y) return 17;
else return 23;
```
Expressions:
```
f1 = lambda: tf.constant(17)
f2 = lambda: tf.constant(23)
r = case([(tf.less(x, y), f1)], default=f2)
```
Example 2:
Pseudocode:
```
if (x < y && x > z) raise OpError("Only one predicate may evaluate true");
if (x < y) return 17;
else if (x > z) return 23;
else return -1;
```
Expressions:
```
x = tf.constant(0)
y = tf.constant(1)
z = tf.constant(2)
def f1(): return tf.constant(17)
def f2(): return tf.constant(23)
def f3(): return tf.constant(-1)
r = case({tf.less(x, y): f1, tf.greater(x, z): f2},
default=f3, exclusive=True)
```
Args:
pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor and a
callable which returns a list of tensors.
default: A callable that returns a list of tensors.
exclusive: True iff more than one predicate is allowed to evaluate to True.
name: A name for this operation (optional).
Returns:
The tensors returned by the first pair whose predicate evaluated to True, or
those returned by `default` if none does.
Raises:
TypeError: If `pred_fn_pairs` is not a list/dictionary.
TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.
TypeError: If `fns[i]` is not callable for any i, or `default` is not
callable.
"""
pfp = pred_fn_pairs # For readability
if not (isinstance(pfp, list) or isinstance(pfp, _basetuple)
or isinstance(pfp, dict)):
raise TypeError("fns must be a list, tuple, or dict")
if isinstance(pfp, dict):
pfp = pfp.items()
if not exclusive:
logging.warn("%s: Provided dictionary of predicate/fn pairs, but "
"exclusive=False. Order of conditional tests is "
"not guaranteed.", name)
for tup in pfp:
if not isinstance(tup, _basetuple) or len(tup) != 2:
raise TypeError("Each entry in pred_fn_pairs must be a 2-tuple")
pred, fn = tup
if pred.dtype != dtypes.bool:
raise TypeError("pred must be of type bool: %s", pred.name)
if not callable(fn):
raise TypeError("fn for pred %s must be callable." % pred.name)
if not callable(default):
raise TypeError("default must be callable.")
preds, fns = map(list, zip(*pfp))
with ops.name_scope(name, "case", [preds]):
if not preds:
return default()
not_preds = []
for i, p in enumerate(preds):
with ops.name_scope("not_%d" % i):
not_preds.append(math_ops.logical_not(p))
and_not_preds = [constant_op.constant(True, name="always_true")]
for i, notp in enumerate(not_preds):
with ops.name_scope("and_not_%d" % i):
and_not_preds.append(math_ops.logical_and(and_not_preds[-1], notp))
# preds = [p1, p2, p3]
# fns = [f1, f2, f3]
# not_preds = [~p1, ~p2, ~p3]
# and_not_preds = [True, ~p1, ~p1 & ~p2, ~p1 & ~p2 & ~p3]
# case_preds = [p1,
# p2 & ~p1,
# p3 & ~p2 & ~p1,
# ~p3 & ~p2 & ~p1]
case_preds = []
for i, (p, and_not_p_prev) in enumerate(zip(preds, and_not_preds[:-1])):
with ops.name_scope("case_%d" % i):
case_preds.append(math_ops.logical_and(p, and_not_p_prev))
with ops.name_scope("case_none_are_true"):
case_preds.append(and_not_preds[-1])
# Create an empty tensor, or list, with the right type and shape
with ops.name_scope("case_create_empty"):
dummy_value = default()
def _correct_empty(v):
if isinstance(v, ops.Operation):
return no_op()
elif v.dtype == dtypes.string:
return array_ops.constant("")
else:
return array_ops.constant(v.dtype.as_numpy_dtype())
if isinstance(dummy_value, collections.Sequence):
dummy_type = type(dummy_value)
empty = lambda: dummy_type(_correct_empty(v) for v in dummy_value)
else:
empty = lambda: _correct_empty(dummy_value)
# case_sequence = [
# cond(~p3 & ~p2 & ~p1, default, empty),
# cond(p3 & ~p2 & ~p1, f3, lambda: case_sequence[0]),
# cond(p2 & ~p1, f2, lambda: case_sequence[1]),
# cond(p1, f1, lambda: case_sequence[2])
# ]
#
# And the return value will be case_sequence[-1]
def _build_case():
all_fns = [fn for fn in fns]
all_fns.append(default)
prev_case = None
for i, (cp, fn) in enumerate(list(zip(case_preds, all_fns))[::-1]):
prev_case = cond(
cp, fn,
empty if i == 0 else lambda: prev_case,
name="If_%d" % i)
return prev_case
if exclusive:
preds_c = array_ops.pack(preds, name="preds_c")
num_true_conditions = math_ops.reduce_sum(
math_ops.cast(preds_c, dtypes.int32), name="num_true_conds")
at_most_one_true_condition = math_ops.less(
num_true_conditions, constant_op.constant(2, name="two_true_conds"))
error_msg = [
("More than one condition evaluated as True but "
"exclusive=True. Conditions: (%s), Values:"
% ", ".join([p.name for p in preds])),
preds_c]
with ops.control_dependencies([
Assert(condition=at_most_one_true_condition,
data=error_msg, summarize=len(preds))]):
case_seq = _build_case()
else:
case_seq = _build_case()
return case_seq
ops.RegisterShape("Enter")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Exit")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("NextIteration")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("RefEnter")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("RefExit")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("RefNextIteration")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ControlTrigger")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("NoOp")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Abort")(common_shapes.call_cpp_shape_fn)
@ops.RegisterShape("LoopCond")
def _LoopCondShape(op):
"""Shape function for the LoopCond op."""
return [op.inputs[0].get_shape().merge_with(tensor_shape.scalar())]
ops.RegisterShape("Merge")(common_shapes.call_cpp_shape_fn)
def _MergeShape(op):
"""Shape function for the Merge op.
The Merge op takes many inputs of arbitrary shapes, and produces a
first output that is one of those inputs, and a second scalar
output.
If all input shapes are known and have the same rank, the output
shape must have that rank, otherwise the output shape is unknown.
Each output dimension is specified only if that dimension in all
inputs are the same.
Args:
op: A Merge Operation.
Returns:
A single-element list containing the Shape of the Merge op.
"""
output_shape = op.inputs[0].get_shape()
if output_shape.dims is None:
return [tensor_shape.unknown_shape(), tensor_shape.scalar()]
else:
for input_ in op.inputs[1:]:
input_shape = input_.get_shape()
if input_shape.dims is None or input_shape.ndims != output_shape.ndims:
return [tensor_shape.unknown_shape(), tensor_shape.scalar()]
else:
output_shape = tensor_shape.TensorShape(
[input_dim.value if input_dim.value == output_dim.value else None
for input_dim, output_dim in zip(input_shape.dims,
output_shape.dims)])
return [output_shape, tensor_shape.scalar()]
ops.RegisterShape("RefMerge")(_MergeShape)
ops.RegisterShape("RefSelect")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("RefSwitch")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Switch")(common_shapes.call_cpp_shape_fn)
ops.register_proto_function(ops.GraphKeys.COND_CONTEXT,
proto_type=control_flow_pb2.CondContextDef,
to_proto=CondContext.to_proto,
from_proto=CondContext.from_proto)
ops.register_proto_function(ops.GraphKeys.WHILE_CONTEXT,
proto_type=control_flow_pb2.WhileContextDef,
to_proto=WhileContext.to_proto,
from_proto=WhileContext.from_proto)
| apache-2.0 |
shanemcd/ansible | lib/ansible/module_utils/facts/system/env.py | 232 | 1170 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.module_utils.six import iteritems
from ansible.module_utils.facts.collector import BaseFactCollector
class EnvFactCollector(BaseFactCollector):
name = 'env'
_fact_ids = set()
def collect(self, module=None, collected_facts=None):
env_facts = {}
env_facts['env'] = {}
for k, v in iteritems(os.environ):
env_facts['env'][k] = v
return env_facts
| gpl-3.0 |
junghans/espressopp | src/interaction/CoulombKSpaceEwald.py | 2 | 5870 | # Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*********************************************************************************************
**CoulombKSpaceEwald** - Coulomb potential and interaction Objects (`K` space part)
*********************************************************************************************
.. math::
\frac{1}{2\pi V}
\sum_{m\in \mathbb{Z}^3 \atop 0<|m|<k_{max}}
\frac{exp(-\frac{\pi^2}{\alpha^2}m^{\prime 2})}{m^{\prime 2}}
\left\lvert\sum_{i=1}^{N}
q_{i}\cdot exp(2\pi i r_{i}\cdot m^{\prime})\right\rvert^{2}
This is the `K` space part of potential of Coulomb long range interaction according to the Ewald
summation technique. Good explanation of Ewald summation could be found here [Allen89]_,
[Deserno98]_.
Example:
>>> ewaldK_pot = espressopp.interaction.CoulombKSpaceEwald(system, coulomb_prefactor, alpha, kspacecutoff)
>>> ewaldK_int = espressopp.interaction.CellListCoulombKSpaceEwald(system.storage, ewaldK_pot)
>>> system.addInteraction(ewaldK_int)
**!IMPORTANT** Coulomb interaction needs `R` space part as well CoulombRSpace_.
.. _CoulombRSpace: espressopp.interaction.CoulombRSpace.html
Definition:
It provides potential object *CoulombKSpaceEwald* and interaction object *CellListCoulombKSpaceEwald* based on
all particles list.
The *potential* is based on the system information (System_) and parameters:
Coulomb prefactor (coulomb_prefactor), Ewald parameter (alpha),
and the cutoff in K space (kspacecutoff).
.. _System: espressopp.System.html
>>> ewaldK_pot = espressopp.interaction.CoulombKSpaceEwald(system, coulomb_prefactor, alpha, kspacecutoff)
Potential Properties:
* *ewaldK_pot.prefactor*
The property 'prefactor' defines the Coulomb prefactor.
* *ewaldK_pot.alpha*
The property 'alpha' defines the Ewald parameter :math:`\\alpha`.
* *ewaldK_pot.kmax*
The property 'kmax' defines the cutoff in `K` space.
The *interaction* is based on the all particles list. It needs the information from Storage_
and `K` space part of potential.
.. _Storage: espressopp.storage.Storage.html
>>> ewaldK_int = espressopp.interaction.CellListCoulombKSpaceEwald(system.storage, ewaldK_pot)
Interaction Methods:
* *getPotential()*
Access to the local potential.
Adding the interaction to the system:
>>> system.addInteraction(ewaldK_int)
References:
.. [Allen89] M.P.Allen, D.J.Tildesley, `Computer simulation of liquids`, *Clarendon Press*, **1989** 385 p.
.. [Deserno98] M.Deserno and C.Holm, *J. Chem. Phys.*, 109(18), **1998**, p.7678
.. function:: espressopp.interaction.CoulombKSpaceEwald(system, prefactor, alpha, kmax)
:param system:
:param prefactor:
:param alpha:
:param kmax:
:type system:
:type prefactor:
:type alpha:
:type kmax:
.. function:: espressopp.interaction.CellListCoulombKSpaceEwald(storage, potential)
:param storage:
:param potential:
:type storage:
:type potential:
.. function:: espressopp.interaction.CellListCoulombKSpaceEwald.getFixedPairList()
:rtype: A Python list of lists.
.. function:: espressopp.interaction.CellListCoulombKSpaceEwald.getPotential()
:rtype:
"""
from espressopp import pmi
from espressopp.esutil import *
from espressopp.interaction.Potential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_CoulombKSpaceEwald, \
interaction_CellListCoulombKSpaceEwald
class CoulombKSpaceEwaldLocal(PotentialLocal, interaction_CoulombKSpaceEwald):
def __init__(self, system, prefactor, alpha, kmax):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_CoulombKSpaceEwald, system, prefactor, alpha, kmax)
class CellListCoulombKSpaceEwaldLocal(InteractionLocal, interaction_CellListCoulombKSpaceEwald):
def __init__(self, storage, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_CellListCoulombKSpaceEwald, storage, potential)
def getFixedPairList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return []
def getPotential(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self)
if pmi.isController:
class CoulombKSpaceEwald(Potential):
pmiproxydefs = dict(
cls = 'espressopp.interaction.CoulombKSpaceEwaldLocal',
pmiproperty = ['prefactor', 'alpha', 'kmax']
)
class CellListCoulombKSpaceEwald(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.CellListCoulombKSpaceEwaldLocal',
pmicall = ['getFixedPairList','getPotential']
)
| gpl-3.0 |
Yanjing123/myicons | fontbuilder/renderers.py | 3 | 4943 | import zipfile
import plistlib
import tempfile
import fontforge
from django.template.loader import render_to_string
from rest_framework import renderers
from .utils import minify_css
from .ttf2eot import ttf2eot
class FontCSSRenderer(renderers.BaseRenderer):
media_type = 'text/css'
format = 'css'
charset = 'utf8'
production = False
def render(self, data, media_type=None, render_context=None):
if render_context and render_context.get('response').status_code != 200: return ''
data['production'] = self.production
icons = data['icons']
data['classnames'] = ', '.join('.' + icon['classname'] for icon in icons)
return render_to_string('fontcss.css', data)
class FontCheatSheetRenderer(renderers.BaseRenderer):
media_type = 'text/html'
format = 'html'
charset = 'utf8'
production = False
def render(self, data, media_type=None, render_context=None):
if render_context and render_context.get('response').status_code != 200: return ''
data['production'] = self.production
return render_to_string('fontcheatsheet.html', data)
class PListRenderer(renderers.BaseRenderer):
media_type = 'application/x-plist'
format = 'plist'
def render(self, data, media_type=None, render_context=None):
if render_context and render_context.get('response').status_code != 200: return ''
icons = data['icons']
icondict = {icon['name']: icon['svg_d'] for icon in icons}
return plistlib.writePlistToString(icondict)
class SVGFontRenderer(renderers.BaseRenderer):
media_type = 'text/svg+xml'
format = 'svg'
def render(self, data, media_type=None, render_context=None):
if render_context and render_context.get('response').status_code != 200: return ''
return render_to_string('svgfont.svg', data)
class BinaryFontRenderer(SVGFontRenderer):
media_type = 'application/octet-stream'
charset = None
render_style = 'binary'
svgfile = None
def get_svgfile(self, data):
if self.svgfile: return self.svgfile
svgtext = SVGFontRenderer.render(self, data)
svgfile = tempfile.NamedTemporaryFile(suffix='.svg')
svgfile.write(svgtext)
svgfile.flush()
self.svgfile = svgfile
return self.svgfile
def gen_binaryfont(self, fileformat, font):
fontdata = ''
tempfontfile = tempfile.NamedTemporaryFile(suffix=('.' + fileformat))
with tempfontfile:
font.generate(tempfontfile.name)
fontdata = tempfontfile.read()
return fontdata
def render(self, data, media_type=None, render_context=None):
if render_context and render_context.get('response').status_code != 200: return ''
svgfile = self.get_svgfile(data)
font = fontforge.open(svgfile.name)
fontdata = self.gen_binaryfont(self.format, font)
font.close()
svgfile.close()
return fontdata
class WOFFRenderer(BinaryFontRenderer):
format = 'woff'
class TTFRenderer(BinaryFontRenderer):
format = 'ttf'
class EOTRenderer(BinaryFontRenderer):
format = 'eot'
def render(self, data, media_type=None, render_context=None):
if render_context and render_context.get('response').status_code != 200: return ''
svgfile = self.get_svgfile(data)
font = fontforge.open(svgfile.name)
fontdata = self.gen_binaryfont('ttf', font)
font.close()
svgfile.close()
return ttf2eot(fontdata)
class ZIPPackRenderer(BinaryFontRenderer, FontCSSRenderer, FontCheatSheetRenderer):
media_type = 'application/zip'
charset = None
render_style = 'binary'
format = 'zip'
production = True
def render(self, data, media_type=None, render_context=None):
if render_context and render_context.get('response').status_code != 200: return ''
svgfile = self.get_svgfile(data)
font = fontforge.open(svgfile.name)
ttf = self.gen_binaryfont('ttf', font)
woff = self.gen_binaryfont('woff', font)
packfile = tempfile.TemporaryFile()
pack = zipfile.ZipFile(packfile, 'w')
build_name = data['build_name']
pack.writestr(('fonts/%s.ttf' % build_name), ttf)
pack.writestr(('fonts/%s.eot' % build_name), ttf2eot(ttf))
pack.writestr(('fonts/%s.woff' % build_name), woff)
pack.write(svgfile.name, ('fonts/%s.svg' % build_name))
css = FontCSSRenderer.render(self, data, media_type, render_context)
cheatsheet = FontCheatSheetRenderer.render(self, data, media_type, render_context)
pack.writestr(('css/%s.css' % build_name), css)
pack.writestr(('css/%s.min.css' % build_name), minify_css(css))
pack.writestr('cheatsheet.html', cheatsheet)
pack.close()
packfile.seek(0)
ret = packfile.read()
packfile.close()
return ret
| bsd-2-clause |
PCManticore/argus-ci | argus/backends/base.py | 3 | 4663 | # Copyright 2015 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import os
import six
from argus import util
LOG = util.get_logger()
@six.add_metaclass(abc.ABCMeta)
class BaseBackend(object):
"""Class for managing instances
The *backend* is used for building and managing an underlying
instance, being it an OpenStack instance, OpenNebula instance
or a containerized OS.
:param conf:
A configuration object, which holds argus related info.
:param name:
The name of the instance that will be created.
:param userdata:
If any, the userdata which will be available in the
instance to the corresponding cloud initialization
service.
:param metadata:
If any, the metadata which should be available in the
instance to the corresponding cloud initialization
service.
"""
def __init__(self, conf, name=None, userdata=None, metadata=None,
availability_zone=None):
self._name = name
self._conf = conf
self._availability_zone = availability_zone
self.userdata = userdata
self.metadata = metadata
@abc.abstractmethod
def setup_instance(self):
"""Setup an underlying instance."""
@abc.abstractmethod
def cleanup(self):
"""Destroy and cleanup the relevant resources created by :meth:`setup_instance`."""
@abc.abstractmethod
def get_remote_client(self, **kwargs):
"""Get a remote client to the underlying instance."""
@abc.abstractproperty
def remote_client(self):
"""An astract property which should return the default client."""
class CloudBackend(BaseBackend):
"""Base backend for cloud related tasks."""
@abc.abstractmethod
def get_remote_client(self, username=None, password=None, **kwargs):
"""Get a remote client
This is different than :attr:`remote_client`, because that
will always return a client with predefined credentials,
while this method allows for a fine-grained control over this aspect.
`password` can be omitted if authentication by SSH key is used.
The **kwargs parameter can be used for additional options (currently none).
"""
@staticmethod
def _get_log_template(suffix):
template = "{}{}.log".format("{}", "-" + suffix if suffix else "")
return template
def save_instance_output(self, suffix=None):
"""Retrieve and save all data written through the COM port.
If a `suffix` is provided, then the log name is preceded by it.
"""
if not self._conf.argus.output_directory:
return
template = self._get_log_template(suffix)
path = os.path.join(self._conf.argus.output_directory,
template.format(self.internal_instance_id()))
content = self.instance_output()
if not content.strip():
LOG.warn("Empty console output; nothing to save.")
return
LOG.info("Saving instance console output to: %s", path)
with open(path, "wb") as stream:
stream.write(content)
@abc.abstractmethod
def instance_output(self, limit=None):
"""Get the underlying's instance output, if any.
:param limit:
Number of lines to fetch from the end of console log.
"""
@abc.abstractmethod
def internal_instance_id(self):
"""Get the underlying's instance id, depending on the internals of the backend."""
@abc.abstractmethod
def reboot_instance(self):
"""Reboot the underlying instance."""
@abc.abstractmethod
def instance_password(self):
"""Get the underlying instance password, if any."""
@abc.abstractmethod
def private_key(self):
"""Get the underlying private key."""
@abc.abstractmethod
def public_key(self):
"""Get the underlying public key."""
@abc.abstractmethod
def floating_ip(self):
"""Get the floating ip that was attached to the underlying instance."""
| apache-2.0 |
smartbgp/libbgp | libbgp/tests/unit/net/test_family.py | 1 | 1329 | # Copyright 2015-2017 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from libbgp.net.family import Family, AFI, SAFI
class TestFamily(unittest.TestCase):
def test_str(self):
self.assertEqual('ipv4-unicast', str(Family(AFI.INET, SAFI.UNICAST)))
self.assertEqual('ipv4-label_unicast', str(Family(AFI.INET, SAFI.MPLS_LABEL)))
self.assertEqual('l2vpn-evpn', str(Family(AFI.L2VPN, SAFI.EVPN)))
self.assertEqual('ipv4-flowspec', str(Family(AFI.INET, SAFI.FLOWSPEC)))
self.assertEqual('ipv4-mplsvpn', str(Family(AFI.INET, SAFI.MPLS_VPN)))
self.assertEqual('linkstate-linkstate', str(Family(AFI.BGPLS, SAFI.BGPLS)))
self.assertEqual('ipv6-mplsvpn', str(Family(AFI.INET6, SAFI.MPLS_VPN)))
| apache-2.0 |
mancoast/CPythonPyc_test | fail/300_test_threading_local.py | 5 | 2646 | import unittest
from doctest import DocTestSuite
from test import support
import threading
import weakref
import gc
class Weak(object):
pass
def target(local, weaklist):
weak = Weak()
local.weak = weak
weaklist.append(weakref.ref(weak))
class ThreadingLocalTest(unittest.TestCase):
def test_local_refs(self):
self._local_refs(20)
self._local_refs(50)
self._local_refs(100)
def _local_refs(self, n):
local = threading.local()
weaklist = []
for i in range(n):
t = threading.Thread(target=target, args=(local, weaklist))
t.start()
t.join()
del t
gc.collect()
self.assertEqual(len(weaklist), n)
# XXX threading.local keeps the local of the last stopped thread alive.
deadlist = [weak for weak in weaklist if weak() is None]
self.assertEqual(len(deadlist), n-1)
# Assignment to the same thread local frees it sometimes (!)
local.someothervar = None
gc.collect()
deadlist = [weak for weak in weaklist if weak() is None]
self.assert_(len(deadlist) in (n-1, n), (n, len(deadlist)))
def test_derived(self):
# Issue 3088: if there is a threads switch inside the __init__
# of a threading.local derived class, the per-thread dictionary
# is created but not correctly set on the object.
# The first member set may be bogus.
import time
class Local(threading.local):
def __init__(self):
time.sleep(0.01)
local = Local()
def f(i):
local.x = i
# Simply check that the variable is correctly set
self.assertEqual(local.x, i)
threads= []
for i in range(10):
t = threading.Thread(target=f, args=(i,))
t.start()
threads.append(t)
for t in threads:
t.join()
def test_main():
suite = unittest.TestSuite()
suite.addTest(DocTestSuite('_threading_local'))
suite.addTest(unittest.makeSuite(ThreadingLocalTest))
try:
from thread import _local
except ImportError:
pass
else:
import _threading_local
local_orig = _threading_local.local
def setUp(test):
_threading_local.local = _local
def tearDown(test):
_threading_local.local = local_orig
suite.addTest(DocTestSuite('_threading_local',
setUp=setUp, tearDown=tearDown)
)
support.run_unittest(suite)
if __name__ == '__main__':
test_main()
| gpl-3.0 |
zaxliu/deepnap | experiments/kdd-exps/experiment_QNN_legacy_template.py | 1 | 4398 | # System built-in modules
import time
from datetime import datetime
import sys
import os
from multiprocessing import Pool
# Project dependency modules
import pandas as pd
pd.set_option('mode.chained_assignment', None) # block warnings due to DataFrame value assignment
import lasagne
# Project modules
sys.path.append('../')
from sleep_control.traffic_emulator import TrafficEmulator
from sleep_control.traffic_server import TrafficServer
from sleep_control.controller import QController, DummyController, NController
from sleep_control.integration import Emulation
from sleep_control.env_models import SJTUModel
from rl.qtable import QAgent
from rl.qnn_theano import QAgentNN
from rl.mixin import PhiMixin, DynaMixin
sys_stdout = sys.stdout
log_file_name = "Default"
# Composite classes
class Phi_QAgentNN(PhiMixin, QAgentNN):
def __init__(self, **kwargs):
super(Phi_QAgentNN, self).__init__(**kwargs)
# Parameters
# |- Agent
# |- QAgent
actions = [(True, None), (False, 'serve_all')]
gamma, alpha = 0.9, 0.9 # TD backup
explore_strategy, epsilon = 'epsilon', 0.02 # exploration
# |- QAgentNN
# | - Phi
phi_length = 5
dim_state = (1, phi_length, 3+2)
range_state_slice = [(0, 10), (0, 10), (0, 10), (0, 1), (0, 1)]
range_state = [[range_state_slice]*phi_length]
# | - Other params
momentum, learning_rate = 0.9, 0.01 # SGD
num_buffer, memory_size = 2, 200
reward_scaling, reward_scaling_update = 1, 'adaptive'
batch_size, update_period, freeze_period, rs_period = 100, 4, 16, 32
# |- Env model
Rs, Rw, Rf, Co, Cw = 1.0, -1.0, -10.0, -5.0, 0.0
beta = None # R = (1-beta)*ServiceReward + beta*Cost
reward_params = (Rs, Rw, Rf, Co, Cw, beta)
# |- Env
# |- Time
start_time = pd.to_datetime("2014-10-15 09:20:00")
total_time = pd.Timedelta(days=7)
time_step = pd.Timedelta(seconds=2)
backoff_epochs = num_buffer*memory_size+phi_length
head_datetime = start_time - time_step*backoff_epochs
tail_datetime = head_datetime + total_time
TOTAL_EPOCHS = int(total_time/time_step)
# |- Reward
rewarding = {'serve': Rs, 'wait': Rw, 'fail': Rf}
# load from processed data
session_df =pd.read_csv(
filepath_or_buffer='../data/trace_dh3.dat',
parse_dates=['startTime_datetime', 'endTime_datetime']
)
te = TrafficEmulator(
session_df=session_df, time_step=time_step,
head_datetime=head_datetime, tail_datetime=tail_datetime,
rewarding=rewarding,
verbose=2)
ts = TrafficServer(cost=(Co, Cw), verbose=2)
agent = Phi_QAgentNN(
phi_length=phi_length,
dim_state=dim_state, range_state=range_state,
f_build_net = None,
batch_size=batch_size, learning_rate=learning_rate, momentum=momentum,
reward_scaling=reward_scaling, reward_scaling_update=reward_scaling_update, rs_period=rs_period,
update_period=update_period, freeze_period=freeze_period,
memory_size=memory_size, num_buffer=num_buffer,
# Below is QAgent params
actions=actions, alpha=alpha, gamma=gamma,
explore_strategy=explore_strategy, epsilon=epsilon,
verbose=2)
c = QController(agent=agent)
emu = Emulation(te=te, ts=ts, c=c, beta=beta)
# Heavyliftings
t = time.time()
sys.stdout = sys_stdout
log_path = './log/'
if os.path.isfile(log_path+log_file_name):
print "Log file {} already exist. Experiment cancelled.".format(log_file_name)
else:
log_file = open(log_path+log_file_name,"w")
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
while emu.epoch is not None and emu.epoch<TOTAL_EPOCHS:
# log time
print "Epoch {},".format(emu.epoch),
left = emu.te.head_datetime + emu.te.epoch*emu.te.time_step
right = left + emu.te.time_step
print "{} - {}".format(left.strftime("%Y-%m-%d %H:%M:%S"), right.strftime("%Y-%m-%d %H:%M:%S"))
emu.step()
print
if emu.epoch%(0.05*TOTAL_EPOCHS)==0:
sys.stdout = sys_stdout
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
sys.stdout = sys_stdout
log_file.close()
print
print log_file_name,
print '{:.3f} sec,'.format(time.time()-t),
print '{:.3f} min'.format((time.time()-t)/60)
| bsd-3-clause |
Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/python2.7/distutils/command/install_scripts.py | 241 | 2068 | """distutils.command.install_scripts
Implements the Distutils 'install_scripts' command, for installing
Python scripts."""
# contributed by Bastian Kleineidam
__revision__ = "$Id$"
import os
from distutils.core import Command
from distutils import log
from stat import ST_MODE
class install_scripts (Command):
description = "install scripts (Python or otherwise)"
user_options = [
('install-dir=', 'd', "directory to install scripts to"),
('build-dir=','b', "build directory (where to install from)"),
('force', 'f', "force installation (overwrite existing files)"),
('skip-build', None, "skip the build steps"),
]
boolean_options = ['force', 'skip-build']
def initialize_options (self):
self.install_dir = None
self.force = 0
self.build_dir = None
self.skip_build = None
def finalize_options (self):
self.set_undefined_options('build', ('build_scripts', 'build_dir'))
self.set_undefined_options('install',
('install_scripts', 'install_dir'),
('force', 'force'),
('skip_build', 'skip_build'),
)
def run (self):
if not self.skip_build:
self.run_command('build_scripts')
self.outfiles = self.copy_tree(self.build_dir, self.install_dir)
if os.name == 'posix':
# Set the executable bits (owner, group, and world) on
# all the scripts we just installed.
for file in self.get_outputs():
if self.dry_run:
log.info("changing mode of %s", file)
else:
mode = ((os.stat(file)[ST_MODE]) | 0555) & 07777
log.info("changing mode of %s to %o", file, mode)
os.chmod(file, mode)
def get_inputs (self):
return self.distribution.scripts or []
def get_outputs(self):
return self.outfiles or []
# class install_scripts
| gpl-3.0 |
lanen/youtube-dl | youtube_dl/extractor/defense.py | 123 | 1240 | from __future__ import unicode_literals
from .common import InfoExtractor
class DefenseGouvFrIE(InfoExtractor):
IE_NAME = 'defense.gouv.fr'
_VALID_URL = r'http://.*?\.defense\.gouv\.fr/layout/set/ligthboxvideo/base-de-medias/webtv/(?P<id>[^/?#]*)'
_TEST = {
'url': 'http://www.defense.gouv.fr/layout/set/ligthboxvideo/base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1',
'md5': '75bba6124da7e63d2d60b5244ec9430c',
'info_dict': {
'id': '11213',
'ext': 'mp4',
'title': 'attaque-chimique-syrienne-du-21-aout-2013-1'
}
}
def _real_extract(self, url):
title = self._match_id(url)
webpage = self._download_webpage(url, title)
video_id = self._search_regex(
r"flashvars.pvg_id=\"(\d+)\";",
webpage, 'ID')
json_url = (
'http://static.videos.gouv.fr/brightcovehub/export/json/%s' %
video_id)
info = self._download_json(json_url, title, 'Downloading JSON config')
video_url = info['renditions'][0]['url']
return {
'id': video_id,
'ext': 'mp4',
'url': video_url,
'title': title,
}
| unlicense |
campbe13/openhatch | vendor/packages/Django/tests/modeltests/ordering/models.py | 114 | 1139 | """
6. Specifying ordering
Specify default ordering for a model using the ``ordering`` attribute, which
should be a list or tuple of field names. This tells Django how to order
``QuerySet`` results.
If a field name in ``ordering`` starts with a hyphen, that field will be
ordered in descending order. Otherwise, it'll be ordered in ascending order.
The special-case field name ``"?"`` specifies random order.
The ordering attribute is not required. If you leave it off, ordering will be
undefined -- not random, just undefined.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
class Meta:
ordering = ('-pub_date', 'headline')
def __str__(self):
return self.headline
@python_2_unicode_compatible
class ArticlePKOrdering(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
class Meta:
ordering = ('-pk',)
def __str__(self):
return self.headline
| agpl-3.0 |
Stargrazer82301/CAAPR | CAAPR/CAAPR_AstroMagic/PTS/pts/eagle/plotdensitycurves.py | 2 | 4043 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.eagle.plotdensitycurves Plot stellar, gas and dust
# densities in function of the galaxy radius for an EAGLE SKIRT-run.
#
# The facilities in this module serve to plot stellar, gas and dust
# densities in function of the galaxy radius for a particular EAGLE SKIRT-run.
# The data is calculated based on the SPH particles in the input files,
# assuming al mass is concentrated in a particle's center position.
# ----------------------------------------------------------------------
# use a non-interactive back-end to generate high-quality vector graphics
import matplotlib.pyplot as plt
# import standard modules
import os.path
import numpy as np
# import pts modules
from ..core.tools import archive as arch
# ----------------------------------------------------------------------
# load columns text file in given directory and with name ending with given extension
def loadfile(inpath, extension):
filenames = arch.listdir(inpath, extension)
if len(filenames)!=1: raise ValueError("input file not found")
filepath = os.path.join(inpath, filenames[0])
return np.loadtxt(arch.opentext(filepath), unpack=True)
# ----------------------------------------------------------------------
## This function creates a PDF plot with histograms of stellar, gas and dust
# densities in function of the galaxy radius for a particular EAGLE SKIRT-run.
# The data is calculated based on the SPH particles in the input files,
# assuming al mass is concentrated in a particle's center position.
# The output plot is placed in the SKIRT-run's visualization directory.
def plotdensitycurves(skirtrun):
# setup the figure
figure = plt.figure(figsize=(10,6))
rmax = 50 # kpc
# load and plot the stars
x,y,z,h,M,Z,t = loadfile(skirtrun.inpath(), "_stars.dat")
r = np.sqrt(x*x + y*y + z*z)/1000 # kpc
r[r>rmax] = rmax
plt.hist(r, weights=M, bins=25, range=(0,rmax), histtype='step', log=True, color='b', label="stars")
# load and plot the gas
x,y,z,h,Mgas,Z,T = loadfile(skirtrun.inpath(), "_gas.dat")
r = np.sqrt(x*x + y*y + z*z)/1000 # kpc
r[r>rmax] = rmax
M = Mgas.copy()
M[np.abs(T)>75000] = 0
if np.any(M):
plt.hist(r, weights=M*Z, bins=25, range=(0,rmax), histtype='step', log=True, color='m', ls='dashed', label="metals (T<75000K)")
M = Mgas.copy()
M[np.abs(T)>8000] = 0
if np.any(M):
plt.hist(r, weights=M*Z, bins=25, range=(0,rmax), histtype='step', log=True, color='m', ls='dotted', label="metals (T<8000K)")
M = Mgas.copy()
M[T>8000] = 0
if np.any(M):
plt.hist(r, weights=M*Z, bins=25, range=(0,rmax), histtype='step', log=True, color='m', ls='solid', label="metals (T<8000K or SFR>0)")
# load and plot the hii regions
try:
x,y,z,h,SFR,Z,logC,P,fPDR = loadfile(skirtrun.inpath(), "_hii.dat")
r = np.sqrt(x*x + y*y + z*z)/1000 # kpc
r[r>rmax] = rmax
plt.hist(r, weights=SFR*1e7, bins=25, range=(0,rmax), histtype='step', log=True, color='c', label="hii regions")
except ValueError:
pass
# add axis labels, legend and title
plt.grid('on')
plt.xlabel("r (kpc)", fontsize='medium')
plt.ylabel("Mass (Msun)", fontsize='medium')
plt.ylim(1e4, 1e9)
plt.legend(loc='upper right', prop={'size':'small'})
plt.title("runid {} -- {}".format(skirtrun.runid(), skirtrun.prefix()), fontsize='medium')
# save the figure
plotpath = os.path.join(skirtrun.vispath(), skirtrun.prefix()+"_density_curves.pdf")
plt.savefig(plotpath, bbox_inches='tight', pad_inches=0.25)
plt.close()
print "Created PDF plot file " + plotpath
# ----------------------------------------------------------------------
| mit |
savoirfairelinux/odoo | addons/anonymization/__init__.py | 441 | 1080 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import anonymization
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
bootphon/abkhazia | abkhazia/commands/abkhazia_acoustic.py | 1 | 10116 | # Copyright 2016 Thomas Schatz, Xuan-Nga Cao, Mathieu Bernard
#
# This file is part of abkhazia: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abkhazia is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with abkhazia. If not, see <http://www.gnu.org/licenses/>.
"""Implementation of the 'abkhazia acoustic' command"""
import argparse
import os
import textwrap
import abkhazia.acoustic as acoustic
import abkhazia.utils as utils
import abkhazia.kaldi as kaldi
from abkhazia.commands.abstract_command import AbstractKaldiCommand
from abkhazia.corpus import Corpus
class _AmBase(AbstractKaldiCommand):
# name of subcommand in command-line
name = NotImplemented
# one line description of the subcommand
description = NotImplemented
# multiline detailed description
_long_description = NotImplemented
# linked acoustic model class in abkhazia.models.acoustic
am_class = NotImplemented
# because models are successive processings, need to reference the
# previous step. Must be a tuple (short, long), e.g. ('feats',
# 'features')
prev_step = NotImplemented
@classmethod
def long_description(cls):
return textwrap.dedent(cls._long_description)
@classmethod
def add_parser(cls, subparsers):
parser, dir_group = super(_AmBase, cls).add_parser(
subparsers, name=cls.name)
parser.formatter_class = argparse.RawDescriptionHelpFormatter
parser.description = cls.long_description()
lex = parser.add_argument_group('lexixon parameters')
lex.add_argument(
'-s', '--silence-probability', default=0.5,
metavar='<float>', type=float,
help='usually 0.0 or 0.5, default is %(default)s')
lex.add_argument(
'-w', '--word-position-dependent', action='store_true',
help='''If specified the produced language model is destined to be used
with an acoustic model trained with word position
dependent variants of the phones.''')
lex.add_argument(
'-l', '--lang-level', default='word',
help="compute the AM either at phone-level or word-level, "
"default is '%(default)s'",
metavar='<phone|word>', choices=['phone', 'word'])
dir_group.add_argument(
'-f', '--features', metavar='<feats-dir>', default=None,
help='')
if cls.prev_step:
# if not monophone, add a --input-dir option to specify
# input acoustic model
dir_group.add_argument(
'-i', '--input-dir',
metavar='<{}-dir>'.format(cls.prev_step[0]),
help='''the input directory, data is read
from <{0}-dir>/{1}, if not specified use <{0}-dir>=<corpus>.'''
.format(cls.prev_step[0], cls.prev_step[1]))
# add parameters for Kaldi options
kaldi_group = parser.add_argument_group('training parameters')
kaldi.options.add_options(kaldi_group, cls.am_class.options)
return parser
@classmethod
def run(cls, args):
corpus_dir, output_dir = cls._parse_io_dirs(args)
log = utils.logger.get_log(
os.path.join(output_dir, '{}.log'.format(cls.name)),
verbose=args.verbose)
corpus = Corpus.load(corpus_dir, validate=args.validate, log=log)
# get back the features directory TODO use cls._parse_aux_dir
feats = (os.path.join(os.path.dirname(corpus_dir), 'features')
if args.features is None
else os.path.abspath(args.features))
# pack the prepare_lang related arguments
lang_args = {
'level': args.lang_level,
'silence_probability': args.silence_probability,
'position_dependent_phones': args.word_position_dependent,
'keep_tmp_dirs': True if args.recipe else False}
# instanciate and setup the kaldi recipe with standard args
if cls.am_class is not acoustic.Monophone:
# get back the input directory
input_dir = (
os.path.join(os.path.dirname(corpus_dir),
'{}'.format(cls.prev_step[1]))
if args.input_dir is None
else os.path.abspath(args.input_dir))
recipe = cls.am_class(
corpus, feats, input_dir, output_dir, lang_args, log=log)
else: # special case of monophone models
if args.alignment:
cls.am_class = acoustic.MonophoneFromAlignment
recipe = cls.am_class(
corpus, feats, output_dir, lang_args,
args.alignment, log=log)
else:
recipe = cls.am_class(
corpus, feats, output_dir, lang_args, log=log)
recipe.njobs = args.njobs
if args.recipe:
recipe.delete_recipe = False
# setup the model options parsed from command line
for k, v in vars(args).items():
try:
recipe.set_option(k.replace('_', '-'), v)
except KeyError:
pass
except AttributeError:
pass
# finally train the acoustic model
recipe.compute()
class _AmMono(_AmBase):
name = 'monophone'
description = 'Monophone HMM-GMM acoustic model'
am_class = acoustic.Monophone
prev_step = None # monophone is built directly on features
_long_description = '''
Training a monophone HMM-GMM acoustic model on a corpus, with
attached features (<feat-dir> option).
Other training options, such as the number of
Gaussians or iterations, are specified in the "training
parameters" section (see below).
The trained model is wrote in a directory specified by the
<output-dir> option. It can then feed the "abkhazia align",
"abkhazia decode" or "abkhazia acoustic triphone" commands.'''
@classmethod
def add_parser(cls, subparsers):
parser = super(_AmMono, cls).add_parser(subparsers)
parser.add_argument(
'-a', '--alignment', metavar='<file>', default=None,
help='Provide a phone alignment (by default it is estimated '
'during training iterations), when this option is in use, the '
'option --realign-iterations is ignored')
class _AmTri(_AmBase):
name = 'triphone'
description = 'Triphone HMM-GMM acoustic model'
am_class = acoustic.Triphone
prev_step = ('mono', 'monophone')
_long_description = '''
Training a triphone HMM-GMM acoustic model on a corpus.
The model is trained on a monophone model, coming from the
"abkhazia acoustic monophone" command and specified by the
<mono-dir> option.
Other training options, such as the number of Gaussians or
iterations, are specified in the "training parameters" section
(see below).
The trained model is wrote in a directory specified by the
<output-dir> option. It can then feed the "abkhazia align",
"abkhazia decode" or "abkhazia acoustic triphone-sa"
commands.'''
class _AmTriSa(_AmBase):
name = 'triphone-sa'
description = 'Triphone speaker adaptive HMM-GMM acoustic model'
am_class = acoustic.TriphoneSpeakerAdaptive
prev_step = ('tri', 'triphone')
_long_description = '''
Training a triphone speaker adaptive HMM-GMM acoustic model on a
corpus.
The model is trained on a triphone model, coming from the
"abkhazia acoustic triphone" command and specified by the
<tri-dir> option.
Other training options, such as the number of Gaussians or
iterations, are specified in the "training parameters" section
(see below).
The trained model is wrote in a directory specified by the
<output-dir> option. It can then feed the "abkhazia align",
"abkhazia decode" or "abkhazia acoustic triphone-dnn"
commands.'''
class _AmDnn(_AmBase):
name = 'nnet'
description = 'HMM-DNN acoustic model'
am_class = acoustic.NeuralNetwork
prev_step = ('am', 'acoustic-model')
_long_description = '''
Training a neural netwok with pnorm nonlinearities on a
corpus.
The model is trained on top of a previously computed HMM-GMM
acoustic model, specified by the <am-dir> option.
The trained model is wrote in a directory specified by the
<output-dir> option. It can then feed the "abkhazia align" or
"abkhazia decode" commands.
See http://kaldi-asr.org/doc/dnn2.html for details on the DNN
recipe implementation '''
class AbkhaziaAcoustic(object):
name = 'acoustic'
description = 'train acoustic models from corpus, features and LM'
_commands = [_AmMono, _AmTri, _AmTriSa, _AmDnn]
@classmethod
def add_parser(cls, subparsers):
"""Return a parser for the 'abkhazia acoustic' command
Add a subparser and help message for 'monophone', 'triphone',
'triphone-sa' and 'nnet' subcommands.
"""
parser = subparsers.add_parser(cls.name)
parser.formatter_class = argparse.RawTextHelpFormatter
subparsers = parser.add_subparsers(
metavar='<command>',
help='possible commands are:\n' + '\n'.join(
(' {} - {}'.format(
c.name + ' '*(11-len(c.name)), c.description)
for c in cls._commands)))
for command in cls._commands:
command.add_parser(subparsers)
return parser
| gpl-3.0 |
pitunti/alfaPitunti | plugin.video.alfa/core/tvdb.py | 1 | 40140 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------
# tvdb
# ------------------------------------------------------------
# Scraper para el site thetvdb.com usando API v2.1
# Utilizado para obtener datos de series para la videoteca
# del addon y también Kodi.
# ------------------------------------------------------------
import re
import urllib2
from core import jsontools
from core import scrapertools
from core.item import InfoLabels
from platformcode import config, logger
from platformcode import platformtools
HOST = "https://api.thetvdb.com"
HOST_IMAGE = "http://thetvdb.com/banners/"
TOKEN = config.get_setting("tvdb_token", default="")
DEFAULT_LANG = "es"
DEFAULT_HEADERS = {
'Content-Type': 'application/json',
'Accept': 'application/json, application/vnd.thetvdb.v2.1.1',
'Accept-Language': DEFAULT_LANG,
'Authorization': 'Bearer ' + TOKEN,
}
# Traducciones - Inicio
DICT_STATUS = {'Continuing': 'En emisión', 'Ended': 'Finalizada'}
DICT_GENRE = {
'Action': 'Acción',
'Adventure': 'Aventura',
'Animation': 'Animación',
'Children': 'Niños',
'Comedy': 'Comedia',
'Crime': 'Crimen',
'Documentary': 'Documental',
# 'Drama': 'Drama',
'Family': 'Familiar',
'Fantasy': 'Fantasía',
'Food': 'Comida',
'Game Show': 'Concurso',
'Home and Garden': 'Hogar y Jardín',
# 'Horror': 'Horror', 'Mini-Series': 'Mini-Series',
'Mystery': 'Misterio',
'News': 'Noticias',
# 'Reality': 'Telerrealidad',
'Romance': 'Romántico',
'Science-Fiction': 'Ciencia-Ficción',
'Soap': 'Telenovela',
# 'Special Interest': 'Special Interest',
'Sport': 'Deporte',
# 'Suspense': 'Suspense',
'Talk Show': 'Programa de Entrevistas',
# 'Thriller': 'Thriller',
'Travel': 'Viaje',
# 'Western': 'Western'
}
DICT_MPAA = {'TV-Y': 'Público pre-infantil: niños menores de 6 años', 'TV-Y7': 'Público infantil: desde 7 años',
'TV-G': 'Público general: sin supervisión familiar', 'TV-PG': 'Guía paterna: Supervisión paternal',
'TV-14': 'Mayores de 14 años', 'TV-MA': 'Mayores de 17 años'}
# Traducciones - Fin
otvdb_global = None
def find_and_set_infoLabels(item):
logger.info()
# logger.info("item es %s" % item)
p_dialog = None
if not item.contentSeason:
p_dialog = platformtools.dialog_progress_bg("Buscando información de la serie", "Espere por favor...")
global otvdb_global
tvdb_result = None
title = item.contentSerieName
# Si el titulo incluye el (año) se lo quitamos
year = scrapertools.find_single_match(title, "^.+?\s*(\(\d{4}\))$")
if year:
title = title.replace(year, "").strip()
item.infoLabels['year'] = year[1:-1]
if not item.infoLabels.get("tvdb_id"):
if not item.infoLabels.get("imdb_id"):
otvdb_global = Tvdb(search=title, year=item.infoLabels['year'])
else:
otvdb_global = Tvdb(imdb_id=item.infoLabels.get("imdb_id"))
elif not otvdb_global or otvdb_global.get_id() != item.infoLabels['tvdb_id']:
otvdb_global = Tvdb(tvdb_id=item.infoLabels['tvdb_id'])
if not item.contentSeason:
p_dialog.update(50, "Buscando información de la serie", "Obteniendo resultados...")
results, info_load = otvdb_global.get_list_results()
logger.debug("results es %s" % results)
if not item.contentSeason:
p_dialog.update(100, "Buscando información de la serie", "Encontrados %s posibles coincidencias" % len(results))
p_dialog.close()
if len(results) > 1:
tvdb_result = platformtools.show_video_info(results, item=item, scraper=Tvdb,
caption="[%s]: Selecciona la serie correcta" % title)
elif len(results) > 0:
tvdb_result = results[0]
# todo revisar
if isinstance(item.infoLabels, InfoLabels):
logger.debug("es instancia de infoLabels")
infoLabels = item.infoLabels
else:
logger.debug("NO ES instancia de infoLabels")
infoLabels = InfoLabels()
if tvdb_result:
infoLabels['tvdb_id'] = tvdb_result['id']
infoLabels['url_scraper'] = ["http://thetvdb.com/index.php?tab=series&id=%s" % infoLabels['tvdb_id']]
if not info_load:
if otvdb_global.get_id() != infoLabels['tvdb_id']:
otvdb_global = Tvdb(tvdb_id=infoLabels['tvdb_id'])
otvdb_global.get_images(infoLabels['tvdb_id'], image="poster")
otvdb_global.get_images(infoLabels['tvdb_id'], image="fanart")
otvdb_global.get_tvshow_cast(infoLabels['tvdb_id'])
item.infoLabels = infoLabels
set_infoLabels_item(item)
return True
else:
item.infoLabels = infoLabels
return False
def set_infoLabels_item(item):
"""
Obtiene y fija (item.infoLabels) los datos extras de una serie, capitulo o pelicula.
@param item: Objeto que representa un pelicula, serie o capitulo. El atributo infoLabels sera modificado
incluyendo los datos extras localizados.
@type item: Item
"""
global otvdb_global
def __leer_datos(otvdb_aux):
item.infoLabels = otvdb_aux.get_infoLabels(item.infoLabels)
if 'infoLabels' in item and 'thumbnail' in item.infoLabels:
item.thumbnail = item.infoLabels['thumbnail']
if 'infoLabels' in item and 'fanart' in item.infoLabels['fanart']:
item.fanart = item.infoLabels['fanart']
if 'infoLabels' in item and 'season' in item.infoLabels:
try:
int_season = int(item.infoLabels['season'])
except ValueError:
logger.debug("El numero de temporada no es valido")
item.contentType = item.infoLabels['mediatype']
return -1 * len(item.infoLabels)
if not otvdb_global or \
(item.infoLabels['tvdb_id'] and otvdb_global.get_id() != item.infoLabels['tvdb_id']) \
or (otvdb_global.search_name and otvdb_global.search_name != item.infoLabels['tvshowtitle']):
if item.infoLabels['tvdb_id']:
otvdb_global = Tvdb(tvdb_id=item.infoLabels['tvdb_id'])
else:
otvdb_global = Tvdb(search=item.infoLabels['tvshowtitle'])
__leer_datos(otvdb_global)
if item.infoLabels['episode']:
try:
int_episode = int(item.infoLabels['episode'])
except ValueError:
logger.debug("El número de episodio (%s) no es valido" % repr(item.infoLabels['episode']))
item.contentType = item.infoLabels['mediatype']
return -1 * len(item.infoLabels)
# Tenemos numero de temporada y numero de episodio validos...
# ... buscar datos episodio
item.infoLabels['mediatype'] = 'episode'
lang = DEFAULT_LANG
if otvdb_global.lang:
lang = otvdb_global.lang
page = 1
_id = None
while not _id:
list_episodes = otvdb_global.list_episodes.get(page)
if not list_episodes:
list_episodes = otvdb_global.get_list_episodes(otvdb_global.get_id(), page)
import threading
semaforo = threading.Semaphore(20)
l_hilo = list()
for e in list_episodes["data"]:
t = threading.Thread(target=otvdb_global.get_episode_by_id, args=(e["id"], lang, semaforo))
t.start()
l_hilo.append(t)
# esperar q todos los hilos terminen
for x in l_hilo:
x.join()
for e in list_episodes['data']:
if e['airedSeason'] == int_season and e['airedEpisodeNumber'] == int_episode:
_id = e['id']
break
_next = list_episodes['links']['next']
if type(_next) == int:
page = _next
else:
break
data_episode = otvdb_global.get_info_episode(otvdb_global.get_id(), int_season, int_episode, lang, _id)
# todo repasar valores que hay que insertar en infoLabels
if data_episode:
item.infoLabels['title'] = data_episode['episodeName']
# fix en casos que el campo desde la api era null--> None
if data_episode["overview"] is not None:
item.infoLabels['plot'] = data_episode["overview"]
item.thumbnail = HOST_IMAGE + data_episode.get('filename', "")
item.infoLabels["rating"] = data_episode.get("siteRating", "")
item.infoLabels['director'] = ', '.join(sorted(data_episode.get('directors', [])))
item.infoLabels['writer'] = ', '.join(sorted(data_episode.get("writers", [])))
if data_episode["firstAired"]:
item.infoLabels['premiered'] = data_episode["firstAired"].split("-")[2] + "/" + \
data_episode["firstAired"].split("-")[1] + "/" + \
data_episode["firstAired"].split("-")[0]
item.infoLabels['aired'] = item.infoLabels['premiered']
guest_stars = data_episode.get("guestStars", [])
l_castandrole = item.infoLabels.get("castandrole", [])
l_castandrole.extend([(p, '') for p in guest_stars])
item.infoLabels['castandrole'] = l_castandrole
# datos para nfo
item.season_id = data_episode["airedSeasonID"]
item.episode_id = data_episode["id"]
return len(item.infoLabels)
else:
# Tenemos numero de temporada valido pero no numero de episodio...
# ... buscar datos temporada
item.infoLabels['mediatype'] = 'season'
data_season = otvdb_global.get_images(otvdb_global.get_id(), "season", int_season)
if data_season and 'image_season_%s' % int_season in data_season:
item.thumbnail = HOST_IMAGE + data_season['image_season_%s' % int_season][0]['fileName']
return len(item.infoLabels)
# Buscar...
else:
# Busquedas por ID...
if (not otvdb_global or otvdb_global.get_id() != item.infoLabels['tvdb_id']) and item.infoLabels['tvdb_id']:
otvdb_global = Tvdb(tvdb_id=item.infoLabels['tvdb_id'])
elif not otvdb_global and item.infoLabels['imdb_id']:
otvdb_global = Tvdb(imdb_id=item.infoLabels['imdb_id'])
elif not otvdb_global and item.infoLabels['zap2it_id']:
otvdb_global = Tvdb(zap2it_id=item.infoLabels['zap2it_id'])
# No se ha podido buscar por ID... se hace por título
if otvdb_global is None:
otvdb_global = Tvdb(search=item.infoLabels['tvshowtitle'])
if otvdb_global and otvdb_global.get_id():
__leer_datos(otvdb_global)
# La busqueda ha encontrado un resultado valido
return len(item.infoLabels)
def get_nfo(item):
"""
Devuelve la información necesaria para que se scrapee el resultado en la videoteca de kodi,
@param item: elemento que contiene los datos necesarios para generar la info
@type item: Item
@rtype: str
@return:
"""
if "season" in item.infoLabels and "episode" in item.infoLabels:
info_nfo = "http://thetvdb.com/?tab=episode&seriesid=%s&seasonid=%s&id=%s\n" \
% (item.infoLabels['tvdb_id'], item.season_id, item.episode_id)
else:
info_nfo = ', '.join(item.infoLabels['url_scraper']) + "\n"
return info_nfo
def completar_codigos(item):
"""
Si es necesario comprueba si existe el identificador de tmdb y sino existe trata de buscarlo
@param item: tipo item
@type item: Item
"""
if not item.infoLabels['tmdb_id']:
listsources = [(item.infoLabels['tvdb_id'], "tvdb_id")]
if item.infoLabels['imdb_id']:
listsources.append((item.infoLabels['imdb_id'], "imdb_id"))
from core.tmdb import Tmdb
ob = Tmdb()
for external_id, external_source in listsources:
ob.search_by_id(id=external_id, source=external_source, tipo='tv')
item.infoLabels['tmdb_id'] = ob.get_id()
if item.infoLabels['tmdb_id']:
url_scraper = "https://www.themoviedb.org/tv/%s" % item.infoLabels['tmdb_id']
item.infoLabels['url_scraper'].append(url_scraper)
break
class Tvdb:
def __init__(self, **kwargs):
self.__check_token()
self.result = {}
self.list_results = []
self.lang = ""
self.search_name = kwargs['search'] = \
re.sub('\[\\\?(B|I|COLOR)\s?[^\]]*\]', '', kwargs.get('search', ''))
self.list_episodes = {}
self.episodes = {}
if kwargs.get('tvdb_id', ''):
# Busqueda por identificador tvdb
self.__get_by_id(kwargs.get('tvdb_id', ''))
if not self.list_results and config.get_setting("tvdb_retry_eng", "videolibrary"):
from platformcode import platformtools
platformtools.dialog_notification("No se ha encontrado en idioma '%s'" % DEFAULT_LANG,
"Se busca en idioma 'en'", sound=False)
self.__get_by_id(kwargs.get('tvdb_id', ''), "en")
self.lang = "en"
elif self.search_name:
# Busqueda por texto
self.__search(kwargs.get('search', ''), kwargs.get('imdb_id', ''), kwargs.get('zap2it_id', ''))
if not self.list_results and config.get_setting("tvdb_retry_eng", "videolibrary"):
from platformcode import platformtools
platformtools.dialog_notification("No se ha encontrado en idioma '%s'" % DEFAULT_LANG,
"Se busca en idioma 'en'")
self.__search(kwargs.get('search', ''), kwargs.get('imdb_id', ''), kwargs.get('zap2it_id', ''), "en")
self.lang = "en"
if not self.result:
# No hay resultados de la busqueda
if kwargs.get('tvdb_id', ''):
buscando = kwargs.get('tvdb_id', '')
else:
buscando = kwargs.get('search', '')
msg = "La busqueda de %s no dio resultados." % buscando
logger.debug(msg)
@classmethod
def __check_token(cls):
# logger.info()
if TOKEN == "":
cls.__login()
else:
# si la fecha no se corresponde con la actual llamamos a refresh_token, ya que el token expira en 24 horas
from time import gmtime, strftime
current_date = strftime("%Y-%m-%d", gmtime())
if config.get_setting("tvdb_token_date", "") != current_date:
# si se ha renovado el token grabamos la nueva fecha
if cls.__refresh_token():
config.set_setting("tvdb_token_date", current_date)
@staticmethod
def __login():
# logger.info()
global TOKEN
apikey = "106B699FDC04301C"
url = HOST + "/login"
params = {"apikey": apikey}
try:
req = urllib2.Request(url, data=jsontools.dump(params), headers=DEFAULT_HEADERS)
response = urllib2.urlopen(req)
html = response.read()
response.close()
except Exception, ex:
message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args))
logger.error("error en: %s" % message)
else:
dict_html = jsontools.load(html)
# logger.debug("dict_html %s" % dict_html)
if "token" in dict_html:
token = dict_html["token"]
DEFAULT_HEADERS["Authorization"] = "Bearer " + token
TOKEN = config.set_setting("tvdb_token", token)
@classmethod
def __refresh_token(cls):
# logger.info()
global TOKEN
is_success = False
url = HOST + "/refresh_token"
try:
req = urllib2.Request(url, headers=DEFAULT_HEADERS)
response = urllib2.urlopen(req)
html = response.read()
response.close()
except urllib2.HTTPError, err:
logger.error("err.code es %s" % err.code)
# si hay error 401 es que el token se ha pasado de tiempo y tenemos que volver a llamar a login
if err.code == 401:
cls.__login()
else:
raise
except Exception, ex:
message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args))
logger.error("error en: %s" % message)
else:
dict_html = jsontools.load(html)
# logger.error("tokencito %s" % dict_html)
if "token" in dict_html:
token = dict_html["token"]
DEFAULT_HEADERS["Authorization"] = "Bearer " + token
TOKEN = config.set_setting("tvdb_token", token)
is_success = True
return is_success
def get_info_episode(self, _id, season=1, episode=1, lang=DEFAULT_LANG, id_episode=None):
"""
Devuelve los datos de un episodio.
@param _id: identificador de la serie
@type _id: str
@param season: numero de temporada [por defecto = 1]
@type season: int
@param episode: numero de episodio [por defecto = 1]
@type episode: int
@param lang: codigo de idioma para buscar
@type lang: str
@param id_episode: codigo del episodio.
@type id_episode: int
@rtype: dict
@return:
"data": {
"id": 0,
"airedSeason": 0,
"airedEpisodeNumber": 0,
"episodeName": "string",
"firstAired": "string",
"guestStars": [
"string"
],
"director": "string", # deprecated
"directors": [
"string"
],
"writers": [
"string"
],
"overview": "string",
"productionCode": "string",
"showUrl": "string",
"lastUpdated": 0,
"dvdDiscid": "string",
"dvdSeason": 0,
"dvdEpisodeNumber": 0,
"dvdChapter": 0,
"absoluteNumber": 0,
"filename": "string",
"seriesId": "string",
"lastUpdatedBy": "string",
"airsAfterSeason": 0,
"airsBeforeSeason": 0,
"airsBeforeEpisode": 0,
"thumbAuthor": 0,
"thumbAdded": "string",
"thumbWidth": "string",
"thumbHeight": "string",
"imdbId": "string",
"siteRating": 0,
"siteRatingCount": 0
},
"errors": {
"invalidFilters": [
"string"
],
"invalidLanguage": "string",
"invalidQueryParams": [
"string"
]
}
"""
logger.info()
if id_episode and self.episodes.get(id_episode):
return self.episodes.get(id_episode)
params = {"airedSeason": "%s" % season, "airedEpisode": "%s" % episode}
try:
import urllib
params = urllib.urlencode(params)
url = HOST + "/series/%s/episodes/query?%s" % (_id, params)
DEFAULT_HEADERS["Accept-Language"] = lang
logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS))
req = urllib2.Request(url, headers=DEFAULT_HEADERS)
response = urllib2.urlopen(req)
html = response.read()
response.close()
except Exception, ex:
message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args))
logger.error("error en: %s" % message)
else:
dict_html = jsontools.load(html)
if "data" in dict_html and "id" in dict_html["data"][0]:
self.get_episode_by_id(dict_html["data"][0]["id"], lang)
return dict_html["data"]
def get_list_episodes(self, _id, page=1):
"""
Devuelve el listado de episodios de una serie.
@param _id: identificador de la serie
@type _id: str
@param page: numero de pagina a buscar [por defecto = 1]
@type page: int
@rtype: dict
@return:
{
"links": {
"first": 0,
"last": 0,
"next": 0,
"previous": 0
},
"data": [
{
"absoluteNumber": 0,
"airedEpisodeNumber": 0,
"airedSeason": 0,
"dvdEpisodeNumber": 0,
"dvdSeason": 0,
"episodeName": "string",
"id": 0,
"overview": "string",
"firstAired": "string",
"lastUpdated": 0
}
],
"errors": {
"invalidFilters": [
"string"
],
"invalidLanguage": "string",
"invalidQueryParams": [
"string"
]
}
}
"""
logger.info()
try:
url = HOST + "/series/%s/episodes?page=%s" % (_id, page)
logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS))
req = urllib2.Request(url, headers=DEFAULT_HEADERS)
response = urllib2.urlopen(req)
html = response.read()
response.close()
except Exception, ex:
message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args))
logger.error("error en: %s" % message)
else:
self.list_episodes[page] = jsontools.load(html)
# logger.info("dict_html %s" % self.list_episodes)
return self.list_episodes[page]
def get_episode_by_id(self, _id, lang=DEFAULT_LANG, semaforo=None):
"""
Obtiene los datos de un episodio
@param _id: identificador del episodio
@type _id: str
@param lang: código de idioma
@param semaforo: semaforo para multihilos
@type semaforo: threading.Semaphore
@type lang: str
@rtype: dict
@return:
{
"data": {
"id": 0,
"airedSeason": 0,
"airedEpisodeNumber": 0,
"episodeName": "string",
"firstAired": "string",
"guestStars": [
"string"
],
"director": "string",
"directors": [
"string"
],
"writers": [
"string"
],
"overview": "string",
"productionCode": "string",
"showUrl": "string",
"lastUpdated": 0,
"dvdDiscid": "string",
"dvdSeason": 0,
"dvdEpisodeNumber": 0,
"dvdChapter": 0,
"absoluteNumber": 0,
"filename": "string",
"seriesId": "string",
"lastUpdatedBy": "string",
"airsAfterSeason": 0,
"airsBeforeSeason": 0,
"airsBeforeEpisode": 0,
"thumbAuthor": 0,
"thumbAdded": "string",
"thumbWidth": "string",
"thumbHeight": "string",
"imdbId": "string",
"siteRating": 0,
"siteRatingCount": 0
},
"errors": {
"invalidFilters": [
"string"
],
"invalidLanguage": "string",
"invalidQueryParams": [
"string"
]
}
}
"""
if semaforo:
semaforo.acquire()
logger.info()
url = HOST + "/episodes/%s" % _id
try:
DEFAULT_HEADERS["Accept-Language"] = lang
logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS))
req = urllib2.Request(url, headers=DEFAULT_HEADERS)
response = urllib2.urlopen(req)
html = response.read()
response.close()
except Exception, ex:
if type(ex) == urllib2.HTTPError:
logger.debug("code es %s " % ex.code)
message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args))
logger.error("error en: %s" % message)
else:
dict_html = jsontools.load(html)
dict_html = dict_html.pop("data")
logger.info("dict_html %s" % dict_html)
self.episodes[_id] = dict_html
if semaforo:
semaforo.release()
def __search(self, name, imdb_id, zap2it_id, lang=DEFAULT_LANG):
"""
Busca una serie a través de una serie de parámetros.
@param name: nombre a buscar
@type name: str
@param imdb_id: codigo identificativo de imdb
@type imdb_id: str
@param zap2it_id: codigo identificativo de zap2it
@type zap2it_id: str
@param lang: código de idioma
@type lang: str
data:{
"aliases": [
"string"
],
"banner": "string",
"firstAired": "string",
"id": 0,
"network": "string",
"overview": "string",
"seriesName": "string",
"status": "string"
}
"""
logger.info()
try:
params = {}
if name:
params["name"] = name
elif imdb_id:
params["imdbId"] = imdb_id
elif zap2it_id:
params["zap2itId"] = zap2it_id
import urllib
params = urllib.urlencode(params)
DEFAULT_HEADERS["Accept-Language"] = lang
url = HOST + "/search/series?%s" % params
logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS))
req = urllib2.Request(url, headers=DEFAULT_HEADERS)
response = urllib2.urlopen(req)
html = response.read()
response.close()
except Exception, ex:
if type(ex) == urllib2.HTTPError:
logger.debug("code es %s " % ex.code)
message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args))
logger.error("error en: %s" % message)
else:
dict_html = jsontools.load(html)
if "errors" in dict_html and "invalidLanguage" in dict_html["errors"]:
# no hay información en idioma por defecto
return
else:
resultado = dict_html["data"]
# todo revisar
if len(resultado) > 1:
index = 0
else:
index = 0
logger.debug("resultado %s" % resultado)
self.list_results = resultado
self.result = resultado[index]
def __get_by_id(self, _id, lang=DEFAULT_LANG, from_get_list=False):
"""
Obtiene los datos de una serie por identificador.
@param _id: código de la serie
@type _id: str
@param lang: código de idioma
@type lang: str
@rtype: dict
@return:
{
"data": {
"id": 0,
"seriesName": "string",
"aliases": [
"string"
],
"banner": "string",
"seriesId": 0,
"status": "string",
"firstAired": "string",
"network": "string",
"networkId": "string",
"runtime": "string",
"genre": [
"string"
],
"overview": "string",
"lastUpdated": 0,
"airsDayOfWeek": "string",
"airsTime": "string",
"rating": "string",
"imdbId": "string",
"zap2itId": "string",
"added": "string",
"siteRating": 0,
"siteRatingCount": 0
},
"errors": {
"invalidFilters": [
"string"
],
"invalidLanguage": "string",
"invalidQueryParams": [
"string"
]
}
}
"""
logger.info()
resultado = {}
url = HOST + "/series/%s" % _id
try:
DEFAULT_HEADERS["Accept-Language"] = lang
req = urllib2.Request(url, headers=DEFAULT_HEADERS)
logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS))
response = urllib2.urlopen(req)
html = response.read()
response.close()
except Exception, ex:
if type(ex) == urllib2.HTTPError:
logger.debug("code es %s " % ex.code)
message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args))
logger.error("error en: %s" % message)
else:
dict_html = jsontools.load(html)
if "errors" in dict_html and "invalidLanguage" in dict_html["errors"]:
return {}
else:
resultado1 = dict_html["data"]
if not resultado1 and from_get_list:
return self.__get_by_id(_id, "en")
logger.debug("resultado %s" % dict_html)
resultado2 = {"image_poster": [{'keyType': 'poster', 'fileName': 'posters/%s-1.jpg' % _id}]}
resultado3 = {"image_fanart": [{'keyType': 'fanart', 'fileName': 'fanart/original/%s-1.jpg' % _id}]}
resultado = resultado1.copy()
resultado.update(resultado2)
resultado.update(resultado3)
logger.debug("resultado total %s" % resultado)
self.list_results = [resultado]
self.result = resultado
return resultado
def get_images(self, _id, image="poster", season=1, lang="en"):
"""
Obtiene un tipo de imagen para una serie para un idioma.
@param _id: identificador de la serie
@type _id: str
@param image: codigo de busqueda, ["poster" (por defecto), "fanart", "season"]
@type image: str
@type season: numero de temporada
@param lang: código de idioma para el que se busca
@type lang: str
@return: diccionario con el tipo de imagenes elegidas.
@rtype: dict
"""
logger.info()
if self.result.get('image_season_%s' % season):
return self.result['image_season_%s' % season]
params = {}
if image == "poster":
params["keyType"] = "poster"
elif image == "fanart":
params["keyType"] = "fanart"
params["subKey"] = "graphical"
elif image == "season":
params["keyType"] = "season"
params["subKey"] = "%s" % season
image += "_%s" % season
try:
import urllib
params = urllib.urlencode(params)
DEFAULT_HEADERS["Accept-Language"] = lang
url = HOST + "/series/%s/images/query?%s" % (_id, params)
logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS))
req = urllib2.Request(url, headers=DEFAULT_HEADERS)
response = urllib2.urlopen(req)
html = response.read()
response.close()
except Exception, ex:
message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args))
logger.error("error en: %s" % message)
return {}
else:
dict_html = jsontools.load(html)
dict_html["image_" + image] = dict_html.pop("data")
self.result.update(dict_html)
return dict_html
def get_tvshow_cast(self, _id, lang=DEFAULT_LANG):
"""
obtiene el casting de una serie
@param _id: codigo de la serie
@type _id: str
@param lang: codigo idioma para buscar
@type lang: str
@return: diccionario con los actores
@rtype: dict
"""
logger.info()
url = HOST + "/series/%s/actors" % _id
DEFAULT_HEADERS["Accept-Language"] = lang
logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS))
req = urllib2.Request(url, headers=DEFAULT_HEADERS)
response = urllib2.urlopen(req)
html = response.read()
response.close()
dict_html = jsontools.load(html)
dict_html["cast"] = dict_html.pop("data")
self.result.update(dict_html)
def get_id(self):
"""
@return: Devuelve el identificador Tvdb de la serie cargada o una cadena vacia en caso de que no
hubiese nada cargado. Se puede utilizar este metodo para saber si una busqueda ha dado resultado o no.
@rtype: str
"""
return str(self.result.get('id', ""))
def get_list_results(self):
"""
Devuelve los resultados encontramos para una serie.
@rtype: list
@return: lista de resultados
"""
logger.info()
list_results = []
# TODO revisar condicion
# si tenemos un resultado y tiene seriesName, ya tenemos la info de la serie, no hace falta volver a buscar
if len(self.list_results) == 1 and "seriesName" in self.result:
list_results.append(self.result)
info_load = True
else:
import threading
semaforo = threading.Semaphore(20)
l_hilo = list()
r_list = list()
def sub_thread(_id, i):
semaforo.acquire()
ret = self.__get_by_id(_id, DEFAULT_LANG, True)
semaforo.release()
r_list.append((ret, i))
for index, e in enumerate(self.list_results):
t = threading.Thread(target=sub_thread, args=(e["id"], index))
t.start()
l_hilo.append(t)
for x in l_hilo:
x.join()
r_list.sort(key=lambda i: i[1])
list_results = [ii[0] for ii in r_list]
info_load = False
return list_results, info_load
def get_infoLabels(self, infoLabels=None, origen=None):
"""
@param infoLabels: Informacion extra de la pelicula, serie, temporada o capitulo.
@type infoLabels: dict
@param origen: Diccionario origen de donde se obtiene los infoLabels, por omision self.result
@type origen: dict
@return: Devuelve la informacion extra obtenida del objeto actual. Si se paso el parametro infoLables, el valor
devuelto sera el leido como parametro debidamente actualizado.
@rtype: dict
"""
# TODO revisar
if infoLabels:
# logger.debug("es instancia de infoLabels")
ret_infoLabels = InfoLabels(infoLabels)
else:
# logger.debug("NO ES instancia de infoLabels")
ret_infoLabels = InfoLabels()
# fix
ret_infoLabels['mediatype'] = 'tvshow'
# Iniciar listados
l_castandrole = ret_infoLabels.get('castandrole', [])
# logger.debug("self.result %s" % self.result)
if not origen:
origen = self.result
# todo revisar
# if 'credits' in origen.keys():
# dic_origen_credits = origen['credits']
# origen['credits_cast'] = dic_origen_credits.get('cast', [])
# origen['credits_crew'] = dic_origen_credits.get('crew', [])
# del origen['credits']
items = origen.items()
for k, v in items:
if not v:
continue
if k == 'overview':
ret_infoLabels['plot'] = v
elif k == 'runtime':
ret_infoLabels['duration'] = int(v) * 60
elif k == 'firstAired':
ret_infoLabels['year'] = int(v[:4])
ret_infoLabels['premiered'] = v.split("-")[2] + "/" + v.split("-")[1] + "/" + v.split("-")[0]
# todo revisar
# elif k == 'original_title' or k == 'original_name':
# ret_infoLabels['originaltitle'] = v
elif k == 'siteRating':
ret_infoLabels['rating'] = float(v)
elif k == 'siteRatingCount':
ret_infoLabels['votes'] = v
elif k == 'status':
# se traduce los estados de una serie
ret_infoLabels['status'] = DICT_STATUS.get(v, v)
# no soy partidario de poner la cadena como studio pero es como lo hace el scraper de manera genérica
elif k == 'network':
ret_infoLabels['studio'] = v
elif k == 'image_poster':
# obtenemos la primera imagen de la lista
ret_infoLabels['thumbnail'] = HOST_IMAGE + v[0]['fileName']
elif k == 'image_fanart':
# obtenemos la primera imagen de la lista
ret_infoLabels['fanart'] = HOST_IMAGE + v[0]['fileName']
# # no disponemos de la imagen de fondo
# elif k == 'banner':
# ret_infoLabels['fanart'] = HOST_IMAGE + v
elif k == 'id':
ret_infoLabels['tvdb_id'] = v
elif k == 'imdbId':
ret_infoLabels['imdb_id'] = v
# no se muestra
# ret_infoLabels['code'] = v
elif k in "rating":
# traducimos la clasificación por edades (content rating system)
ret_infoLabels['mpaa'] = DICT_MPAA.get(v, v)
elif k in "genre":
genre_list = ""
for index, i in enumerate(v):
if index > 0:
genre_list += ", "
# traducimos los generos
genre_list += DICT_GENRE.get(i, i)
ret_infoLabels['genre'] = genre_list
elif k == 'seriesName': # or k == 'name' or k == 'title':
# if len(origen.get('aliases', [])) > 0:
# ret_infoLabels['title'] = v + " " + origen.get('aliases', [''])[0]
# else:
# ret_infoLabels['title'] = v
# logger.info("el titulo es %s " % ret_infoLabels['title'])
ret_infoLabels['title'] = v
elif k == 'cast':
dic_aux = dict((name, character) for (name, character) in l_castandrole)
l_castandrole.extend([(p['name'], p['role']) for p in v if p['name'] not in dic_aux.keys()])
else:
logger.debug("Atributos no añadidos: %s=%s" % (k, v))
pass
# Ordenar las listas y convertirlas en str si es necesario
if l_castandrole:
ret_infoLabels['castandrole'] = l_castandrole
logger.debug("ret_infoLabels %s" % ret_infoLabels)
return ret_infoLabels
| gpl-3.0 |
scroggo/skia | platform_tools/nacl/httpd.py | 116 | 7009 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A tiny web server.
This is intended to be used for testing, and only run from within the examples
directory.
"""
import BaseHTTPServer
import logging
import optparse
import os
import SimpleHTTPServer
import SocketServer
import sys
import urlparse
EXAMPLE_PATH=os.path.dirname(os.path.abspath(__file__))
NACL_SDK_ROOT = os.getenv('NACL_SDK_ROOT', os.path.dirname(EXAMPLE_PATH))
if os.path.exists(NACL_SDK_ROOT):
sys.path.append(os.path.join(NACL_SDK_ROOT, 'tools'))
import decode_dump
import getos
else:
NACL_SDK_ROOT=None
last_nexe = None
last_nmf = None
logging.getLogger().setLevel(logging.INFO)
# Using 'localhost' means that we only accept connections
# via the loop back interface.
SERVER_PORT = 5103
SERVER_HOST = ''
# We only run from the examples directory so that not too much is exposed
# via this HTTP server. Everything in the directory is served, so there should
# never be anything potentially sensitive in the serving directory, especially
# if the machine might be a multi-user machine and not all users are trusted.
# We only serve via the loopback interface.
def SanityCheckDirectory():
httpd_path = os.path.abspath(os.path.dirname(__file__))
serve_path = os.path.abspath(os.getcwd())
# Verify we are serving from the directory this script came from, or bellow
if serve_path[:len(httpd_path)] == httpd_path:
return
logging.error('For security, httpd.py should only be run from within the')
logging.error('example directory tree.')
logging.error('We are currently in %s.' % serve_path)
sys.exit(1)
# An HTTP server that will quit when |is_running| is set to False. We also use
# SocketServer.ThreadingMixIn in order to handle requests asynchronously for
# faster responses.
class QuittableHTTPServer(SocketServer.ThreadingMixIn,
BaseHTTPServer.HTTPServer):
def serve_forever(self, timeout=0.5):
self.is_running = True
self.timeout = timeout
while self.is_running:
self.handle_request()
def shutdown(self):
self.is_running = False
return 1
# "Safely" split a string at |sep| into a [key, value] pair. If |sep| does not
# exist in |str|, then the entire |str| is the key and the value is set to an
# empty string.
def KeyValuePair(str, sep='='):
if sep in str:
return str.split(sep)
else:
return [str, '']
# A small handler that looks for '?quit=1' query in the path and shuts itself
# down if it finds that parameter.
class QuittableHTTPHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.send_header('Cache-Control','no-cache, must-revalidate')
self.send_header('Expires','-1')
self.end_headers()
return f
def do_GET(self):
global last_nexe, last_nmf
(_, _, path, query, _) = urlparse.urlsplit(self.path)
url_params = dict([KeyValuePair(key_value)
for key_value in query.split('&')])
if 'quit' in url_params and '1' in url_params['quit']:
self.send_response(200, 'OK')
self.send_header('Content-type', 'text/html')
self.send_header('Content-length', '0')
self.end_headers()
self.server.shutdown()
return
if path.endswith('.nexe'):
last_nexe = path
if path.endswith('.nmf'):
last_nmf = path
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
def do_POST(self):
(_, _,path, query, _) = urlparse.urlsplit(self.path)
if 'Content-Length' in self.headers:
if not NACL_SDK_ROOT:
self.wfile('Could not find NACL_SDK_ROOT to decode trace.')
return
data = self.rfile.read(int(self.headers['Content-Length']))
nexe = '.' + last_nexe
nmf = '.' + last_nmf
addr = os.path.join(NACL_SDK_ROOT, 'toolchain',
getos.GetPlatform() + '_x86_newlib',
'bin', 'x86_64-nacl-addr2line')
decoder = decode_dump.CoreDecoder(nexe, nmf, addr, None, None)
info = decoder.Decode(data)
trace = decoder.StackTrace(info)
decoder.PrintTrace(trace, sys.stdout)
decoder.PrintTrace(trace, self.wfile)
def Run(server_address,
server_class=QuittableHTTPServer,
handler_class=QuittableHTTPHandler):
httpd = server_class(server_address, handler_class)
logging.info("Starting local server on port %d", server_address[1])
logging.info("To shut down send http://localhost:%d?quit=1",
server_address[1])
try:
httpd.serve_forever()
except KeyboardInterrupt:
logging.info("Received keyboard interrupt.")
httpd.server_close()
logging.info("Shutting down local server on port %d", server_address[1])
def main():
usage_str = "usage: %prog [options] [optional_portnum]"
parser = optparse.OptionParser(usage=usage_str)
parser.add_option(
'--no_dir_check', dest='do_safe_check',
action='store_false', default=True,
help='Do not ensure that httpd.py is being run from a safe directory.')
(options, args) = parser.parse_args(sys.argv)
if options.do_safe_check:
SanityCheckDirectory()
if len(args) > 2:
print 'Too many arguments specified.'
parser.print_help()
elif len(args) == 2:
Run((SERVER_HOST, int(args[1])))
else:
Run((SERVER_HOST, SERVER_PORT))
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
moonboots/tensorflow | tensorflow/python/client/notebook.py | 26 | 4596 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Notebook front-end to TensorFlow.
When you run this binary, you'll see something like below, which indicates
the serving URL of the notebook:
The IPython Notebook is running at: http://127.0.0.1:8888/
Press "Shift+Enter" to execute a cell
Press "Enter" on a cell to go into edit mode.
Press "Escape" to go back into command mode and use arrow keys to navigate.
Press "a" in command mode to insert cell above or "b" to insert cell below.
Your root notebooks directory is FLAGS.notebook_dir
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import socket
import sys
# pylint: disable=g-import-not-at-top
# Official recommended way of turning on fast protocol buffers as of 10/21/14
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION"] = "2"
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
"password", None,
"Password to require. If set, the server will allow public access."
" Only used if notebook config file does not exist.")
flags.DEFINE_string("notebook_dir", "experimental/brain/notebooks",
"root location where to store notebooks")
ORIG_ARGV = sys.argv
# Main notebook process calls itself with argv[1]="kernel" to start kernel
# subprocesses.
IS_KERNEL = len(sys.argv) > 1 and sys.argv[1] == "kernel"
def main(unused_argv):
sys.argv = ORIG_ARGV
if not IS_KERNEL:
# Drop all flags.
sys.argv = [sys.argv[0]]
# NOTE(sadovsky): For some reason, putting this import at the top level
# breaks inline plotting. It's probably a bug in the stone-age version of
# matplotlib.
from IPython.html.notebookapp import NotebookApp # pylint: disable=g-import-not-at-top
notebookapp = NotebookApp.instance()
notebookapp.open_browser = True
# password functionality adopted from quality/ranklab/main/tools/notebook.py
# add options to run with "password"
if FLAGS.password:
from IPython.lib import passwd # pylint: disable=g-import-not-at-top
notebookapp.ip = "0.0.0.0"
notebookapp.password = passwd(FLAGS.password)
else:
print ("\nNo password specified; Notebook server will only be available"
" on the local machine.\n")
notebookapp.initialize(argv=["--notebook-dir", FLAGS.notebook_dir])
if notebookapp.ip == "0.0.0.0":
proto = "https" if notebookapp.certfile else "http"
url = "%s://%s:%d%s" % (proto, socket.gethostname(), notebookapp.port,
notebookapp.base_project_url)
print("\nNotebook server will be publicly available at: %s\n" % url)
notebookapp.start()
return
# Drop the --flagfile flag so that notebook doesn't complain about an
# "unrecognized alias" when parsing sys.argv.
sys.argv = ([sys.argv[0]] +
[z for z in sys.argv[1:] if not z.startswith("--flagfile")])
from IPython.kernel.zmq.kernelapp import IPKernelApp # pylint: disable=g-import-not-at-top
kernelapp = IPKernelApp.instance()
kernelapp.initialize()
# Enable inline plotting. Equivalent to running "%matplotlib inline".
ipshell = kernelapp.shell
ipshell.enable_matplotlib("inline")
kernelapp.start()
if __name__ == "__main__":
# When the user starts the main notebook process, we don't touch sys.argv.
# When the main process launches kernel subprocesses, it writes all flags
# to a tmpfile and sets --flagfile to that tmpfile, so for kernel
# subprocesses here we drop all flags *except* --flagfile, then call
# app.run(), and then (in main) restore all flags before starting the
# kernel app.
if IS_KERNEL:
# Drop everything except --flagfile.
sys.argv = ([sys.argv[0]] +
[x for x in sys.argv[1:] if x.startswith("--flagfile")])
app.run()
| apache-2.0 |
Erotemic/ibeis | ibeis/algo/hots/__init__.py | 1 | 3797 | # -*- coding: utf-8 -*-
# Autogenerated on 12:39:11 2016/10/13
# flake8: noqa
from __future__ import absolute_import, division, print_function, unicode_literals
from ibeis.algo.hots import _pipeline_helpers
from ibeis.algo.hots import chip_match
from ibeis.algo.hots import exceptions
from ibeis.algo.hots import hstypes
from ibeis.algo.hots import match_chips4
from ibeis.algo.hots import name_scoring
from ibeis.algo.hots import neighbor_index
from ibeis.algo.hots import neighbor_index_cache
from ibeis.algo.hots import nn_weights
from ibeis.algo.hots import old_chip_match
from ibeis.algo.hots import pipeline
from ibeis.algo.hots import query_params
from ibeis.algo.hots import query_request
from ibeis.algo.hots import scoring
import utool
print, rrr, profile = utool.inject2(__name__, '[ibeis.algo.hots]')
def reassign_submodule_attributes(verbose=True):
"""
why reloading all the modules doesnt do this I don't know
"""
import sys
if verbose and '--quiet' not in sys.argv:
print('dev reimport')
# Self import
import ibeis.algo.hots
# Implicit reassignment.
seen_ = set([])
for tup in IMPORT_TUPLES:
if len(tup) > 2 and tup[2]:
continue # dont import package names
submodname, fromimports = tup[0:2]
submod = getattr(ibeis.algo.hots, submodname)
for attr in dir(submod):
if attr.startswith('_'):
continue
if attr in seen_:
# This just holds off bad behavior
# but it does mimic normal util_import behavior
# which is good
continue
seen_.add(attr)
setattr(ibeis.algo.hots, attr, getattr(submod, attr))
def reload_subs(verbose=True):
""" Reloads ibeis.algo.hots and submodules """
if verbose:
print('Reloading submodules')
rrr(verbose=verbose)
def wrap_fbrrr(mod):
def fbrrr(*args, **kwargs):
""" fallback reload """
if verbose:
print('No fallback relaod for mod=%r' % (mod,))
# Breaks ut.Pref (which should be depricated anyway)
# import imp
# imp.reload(mod)
return fbrrr
def get_rrr(mod):
if hasattr(mod, 'rrr'):
return mod.rrr
else:
return wrap_fbrrr(mod)
def get_reload_subs(mod):
return getattr(mod, 'reload_subs', wrap_fbrrr(mod))
get_rrr(_pipeline_helpers)(verbose=verbose)
get_rrr(chip_match)(verbose=verbose)
get_rrr(exceptions)(verbose=verbose)
get_rrr(hstypes)(verbose=verbose)
get_rrr(match_chips4)(verbose=verbose)
get_rrr(name_scoring)(verbose=verbose)
get_rrr(neighbor_index)(verbose=verbose)
get_rrr(neighbor_index_cache)(verbose=verbose)
get_rrr(nn_weights)(verbose=verbose)
get_rrr(old_chip_match)(verbose=verbose)
get_rrr(pipeline)(verbose=verbose)
get_rrr(query_params)(verbose=verbose)
get_rrr(query_request)(verbose=verbose)
get_rrr(scoring)(verbose=verbose)
rrr(verbose=verbose)
try:
# hackish way of propogating up the new reloaded submodule attributes
reassign_submodule_attributes(verbose=verbose)
except Exception as ex:
print(ex)
rrrr = reload_subs
IMPORT_TUPLES = [
('_pipeline_helpers', None),
('chip_match', None),
('exceptions', None),
('hstypes', None),
('match_chips4', None),
('name_scoring', None),
('neighbor_index', None),
('neighbor_index_cache', None),
('nn_weights', None),
('old_chip_match', None),
('pipeline', None),
('query_params', None),
('query_request', None),
('scoring', None),
]
"""
Regen Command:
cd /home/joncrall/code/ibeis/ibeis/algo/hots
makeinit.py --modname=ibeis.algo.hots
"""
| apache-2.0 |
pyfisch/servo | tests/wpt/web-platform-tests/tools/third_party/py/testing/code/test_assertion.py | 55 | 7796 | import pytest, py
import re
def exvalue():
import sys
return sys.exc_info()[1]
def f():
return 2
def test_assert():
try:
assert f() == 3
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith('assert 2 == 3\n')
def test_assert_within_finally():
excinfo = py.test.raises(ZeroDivisionError, """
try:
1/0
finally:
i = 42
""")
s = excinfo.exconly()
assert re.search("ZeroDivisionError:.*division", s) is not None
def test_assert_multiline_1():
try:
assert (f() ==
3)
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith('assert 2 == 3\n')
def test_assert_multiline_2():
try:
assert (f() == (4,
3)[-1])
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith('assert 2 ==')
def test_in():
try:
assert "hi" in [1, 2]
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 'hi' in")
def test_is():
try:
assert 1 is 2
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 1 is 2")
def test_attrib():
class Foo(object):
b = 1
i = Foo()
try:
assert i.b == 2
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 1 == 2")
def test_attrib_inst():
class Foo(object):
b = 1
try:
assert Foo().b == 2
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 1 == 2")
def test_len():
l = list(range(42))
try:
assert len(l) == 100
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 42 == 100")
assert "where 42 = len([" in s
def test_assert_keyword_arg():
def f(x=3):
return False
try:
assert f(x=5)
except AssertionError:
e = exvalue()
assert "x=5" in str(e)
# These tests should both fail, but should fail nicely...
class WeirdRepr:
def __repr__(self):
return '<WeirdRepr\nsecond line>'
def bug_test_assert_repr():
v = WeirdRepr()
try:
assert v == 1
except AssertionError:
e = exvalue()
assert str(e).find('WeirdRepr') != -1
assert str(e).find('second line') != -1
assert 0
def test_assert_non_string():
try:
assert 0, ['list']
except AssertionError:
e = exvalue()
assert str(e).find("list") != -1
def test_assert_implicit_multiline():
try:
x = [1,2,3]
assert x != [1,
2, 3]
except AssertionError:
e = exvalue()
assert str(e).find('assert [1, 2, 3] !=') != -1
@py.test.mark.xfail(py.test.__version__[0] != "2",
reason="broken on modern pytest",
run=False
)
def test_assert_with_brokenrepr_arg():
class BrokenRepr:
def __repr__(self): 0 / 0
e = AssertionError(BrokenRepr())
if e.msg.find("broken __repr__") == -1:
py.test.fail("broken __repr__ not handle correctly")
def test_multiple_statements_per_line():
try:
a = 1; assert a == 2
except AssertionError:
e = exvalue()
assert "assert 1 == 2" in str(e)
def test_power():
try:
assert 2**3 == 7
except AssertionError:
e = exvalue()
assert "assert (2 ** 3) == 7" in str(e)
class TestView:
def setup_class(cls):
cls.View = py.test.importorskip("py._code._assertionold").View
def test_class_dispatch(self):
### Use a custom class hierarchy with existing instances
class Picklable(self.View):
pass
class Simple(Picklable):
__view__ = object
def pickle(self):
return repr(self.__obj__)
class Seq(Picklable):
__view__ = list, tuple, dict
def pickle(self):
return ';'.join(
[Picklable(item).pickle() for item in self.__obj__])
class Dict(Seq):
__view__ = dict
def pickle(self):
return Seq.pickle(self) + '!' + Seq(self.values()).pickle()
assert Picklable(123).pickle() == '123'
assert Picklable([1,[2,3],4]).pickle() == '1;2;3;4'
assert Picklable({1:2}).pickle() == '1!2'
def test_viewtype_class_hierarchy(self):
# Use a custom class hierarchy based on attributes of existing instances
class Operation:
"Existing class that I don't want to change."
def __init__(self, opname, *args):
self.opname = opname
self.args = args
existing = [Operation('+', 4, 5),
Operation('getitem', '', 'join'),
Operation('setattr', 'x', 'y', 3),
Operation('-', 12, 1)]
class PyOp(self.View):
def __viewkey__(self):
return self.opname
def generate(self):
return '%s(%s)' % (self.opname, ', '.join(map(repr, self.args)))
class PyBinaryOp(PyOp):
__view__ = ('+', '-', '*', '/')
def generate(self):
return '%s %s %s' % (self.args[0], self.opname, self.args[1])
codelines = [PyOp(op).generate() for op in existing]
assert codelines == ["4 + 5", "getitem('', 'join')",
"setattr('x', 'y', 3)", "12 - 1"]
def test_underscore_api():
py.code._AssertionError
py.code._reinterpret_old # used by pypy
py.code._reinterpret
def test_assert_customizable_reprcompare(monkeypatch):
util = pytest.importorskip("_pytest.assertion.util")
monkeypatch.setattr(util, '_reprcompare', lambda *args: 'hello')
try:
assert 3 == 4
except AssertionError:
e = exvalue()
s = str(e)
assert "hello" in s
def test_assert_long_source_1():
try:
assert len == [
(None, ['somet text', 'more text']),
]
except AssertionError:
e = exvalue()
s = str(e)
assert 're-run' not in s
assert 'somet text' in s
def test_assert_long_source_2():
try:
assert(len == [
(None, ['somet text', 'more text']),
])
except AssertionError:
e = exvalue()
s = str(e)
assert 're-run' not in s
assert 'somet text' in s
def test_assert_raise_alias(testdir):
testdir.makepyfile("""
import sys
EX = AssertionError
def test_hello():
raise EX("hello"
"multi"
"line")
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*def test_hello*",
"*raise EX*",
"*1 failed*",
])
@py.test.mark.xfail(py.test.__version__[0] != "2",
reason="broken on modern pytest",
run=False)
def test_assert_raise_subclass():
class SomeEx(AssertionError):
def __init__(self, *args):
super(SomeEx, self).__init__()
try:
raise SomeEx("hello")
except AssertionError as e:
s = str(e)
assert 're-run' not in s
assert 'could not determine' in s
def test_assert_raises_in_nonzero_of_object_pytest_issue10():
class A(object):
def __nonzero__(self):
raise ValueError(42)
def __lt__(self, other):
return A()
def __repr__(self):
return "<MY42 object>"
def myany(x):
return True
try:
assert not(myany(A() < 0))
except AssertionError:
e = exvalue()
s = str(e)
assert "<MY42 object> < 0" in s
| mpl-2.0 |
joergdietrich/astropy | astropy/table/table.py | 2 | 101482 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..extern import six
from ..extern.six.moves import zip, range
from .index import TableIndices, TableLoc, TableILoc
import re
import sys
from collections import OrderedDict, Mapping
import warnings
from copy import deepcopy
import numpy as np
from numpy import ma
from .. import log
from ..io import registry as io_registry
from ..units import Quantity
from ..utils import isiterable, ShapedLikeNDArray
from ..utils.compat.numpy import broadcast_to as np_broadcast_to
from ..utils.console import color_print
from ..utils.metadata import MetaData
from ..utils.data_info import BaseColumnInfo, MixinInfo, ParentDtypeInfo, DataInfo
from . import groups
from .pprint import TableFormatter
from .column import (BaseColumn, Column, MaskedColumn, _auto_names, FalseArray,
col_copy)
from .row import Row
from .np_utils import fix_column_name, recarray_fromrecords
from .info import TableInfo
from .index import Index, _IndexModeContext, get_index
from . import conf
__doctest_skip__ = ['Table.read', 'Table.write',
'Table.convert_bytestring_to_unicode',
'Table.convert_unicode_to_bytestring',
]
class TableReplaceWarning(UserWarning):
"""
Warning class for cases when a table column is replaced via the
Table.__setitem__ syntax e.g. t['a'] = val.
This does not inherit from AstropyWarning because we want to use
stacklevel=3 to show the user where the issue occurred in their code.
"""
pass
def descr(col):
"""Array-interface compliant full description of a column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
col_dtype = 'O' if (col.info.dtype is None) else col.info.dtype
col_shape = col.shape[1:] if hasattr(col, 'shape') else ()
return (col.info.name, col_dtype, col_shape)
def has_info_class(obj, cls):
return hasattr(obj, 'info') and isinstance(obj.info, cls)
class TableColumns(OrderedDict):
"""OrderedDict subclass for a set of columns.
This class enhances item access to provide convenient access to columns
by name or index, including slice access. It also handles renaming
of columns.
The initialization argument ``cols`` can be a list of ``Column`` objects
or any structure that is valid for initializing a Python dict. This
includes a dict, list of (key, val) tuples or [key, val] lists, etc.
Parameters
----------
cols : dict, list, tuple; optional
Column objects as data structure that can init dict (see above)
"""
def __init__(self, cols={}):
if isinstance(cols, (list, tuple)):
# `cols` should be a list of two-tuples, but it is allowed to have
# columns (BaseColumn or mixins) in the list.
newcols = []
for col in cols:
if has_info_class(col, BaseColumnInfo):
newcols.append((col.info.name, col))
else:
newcols.append(col)
cols = newcols
super(TableColumns, self).__init__(cols)
def __getitem__(self, item):
"""Get items from a TableColumns object.
::
tc = TableColumns(cols=[Column(name='a'), Column(name='b'), Column(name='c')])
tc['a'] # Column('a')
tc[1] # Column('b')
tc['a', 'b'] # <TableColumns names=('a', 'b')>
tc[1:3] # <TableColumns names=('b', 'c')>
"""
if isinstance(item, six.string_types):
return OrderedDict.__getitem__(self, item)
elif isinstance(item, (int, np.integer)):
return self.values()[item]
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return self.values()[item.item()]
elif isinstance(item, tuple):
return self.__class__([self[x] for x in item])
elif isinstance(item, slice):
return self.__class__([self[x] for x in list(self)[item]])
else:
raise IndexError('Illegal key or index value for {} object'
.format(self.__class__.__name__))
def __setitem__(self, item, value):
if item in self:
raise ValueError("Cannot replace column '{0}'. Use Table.replace_column() instead."
.format(item))
super(TableColumns, self).__setitem__(item, value)
def __repr__(self):
names = ("'{0}'".format(x) for x in six.iterkeys(self))
return "<{1} names=({0})>".format(",".join(names), self.__class__.__name__)
def _rename_column(self, name, new_name):
if name == new_name:
return
if new_name in self:
raise KeyError("Column {0} already exists".format(new_name))
mapper = {name: new_name}
new_names = [mapper.get(name, name) for name in self]
cols = list(six.itervalues(self))
self.clear()
self.update(list(zip(new_names, cols)))
# Define keys and values for Python 2 and 3 source compatibility
def keys(self):
return list(OrderedDict.keys(self))
def values(self):
return list(OrderedDict.values(self))
class Table(object):
"""A class to represent tables of heterogeneous data.
`Table` provides a class for heterogeneous tabular data, making use of a
`numpy` structured array internally to store the data values. A key
enhancement provided by the `Table` class is the ability to easily modify
the structure of the table by adding or removing columns, or adding new
rows of data. In addition table and column metadata are fully supported.
`Table` differs from `~astropy.nddata.NDData` by the assumption that the
input data consists of columns of homogeneous data, where each column
has a unique identifier and may contain additional metadata such as the
data unit, format, and description.
Parameters
----------
data : numpy ndarray, dict, list, Table, or table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names
dtype : list, optional
Specify column data types
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data (default=True).
rows : numpy ndarray, list of lists, optional
Row-oriented data for table instead of ``data`` argument
copy_indices : bool, optional
Copy any indices in the input data (default=True)
**kwargs : dict, optional
Additional keyword args when converting table-like object
.. note::
If the input is a Table the ``meta`` is always copied regardless of the
``copy`` parameter.
"""
meta = MetaData()
# Define class attributes for core container objects to allow for subclass
# customization.
Row = Row
Column = Column
MaskedColumn = MaskedColumn
TableColumns = TableColumns
TableFormatter = TableFormatter
def as_array(self, keep_byteorder=False):
"""
Return a new copy of the table in the form of a structured np.ndarray or
np.ma.MaskedArray object (as appropriate).
Parameters
----------
keep_byteorder : bool, optional
By default the returned array has all columns in native byte
order. However, if this option is `True` this preserves the
byte order of all columns (if any are non-native).
Returns
-------
table_array : np.ndarray (unmasked) or np.ma.MaskedArray (masked)
Copy of table as a numpy structured array
"""
if len(self.columns) == 0:
return None
sys_byteorder = ('>', '<')[sys.byteorder == 'little']
native_order = ('=', sys_byteorder)
dtype = []
cols = self.columns.values()
for col in cols:
col_descr = descr(col)
byteorder = col.info.dtype.byteorder
if not keep_byteorder and byteorder not in native_order:
new_dt = np.dtype(col_descr[1]).newbyteorder('=')
col_descr = (col_descr[0], new_dt, col_descr[2])
dtype.append(col_descr)
empty_init = ma.empty if self.masked else np.empty
data = empty_init(len(self), dtype=dtype)
for col in cols:
# When assigning from one array into a field of a structured array,
# Numpy will automatically swap those columns to their destination
# byte order where applicable
data[col.info.name] = col
return data
def __init__(self, data=None, masked=None, names=None, dtype=None,
meta=None, copy=True, rows=None, copy_indices=True,
**kwargs):
# Set up a placeholder empty table
self._set_masked(masked)
self.columns = self.TableColumns()
self.meta = meta
self.formatter = self.TableFormatter()
self._copy_indices = True # copy indices from this Table by default
self._init_indices = copy_indices # whether to copy indices in init
self.primary_key = None
# Must copy if dtype are changing
if not copy and dtype is not None:
raise ValueError('Cannot specify dtype when copy=False')
# Row-oriented input, e.g. list of lists or list of tuples, list of
# dict, Row instance. Set data to something that the subsequent code
# will parse correctly.
is_list_of_dict = False
if rows is not None:
if data is not None:
raise ValueError('Cannot supply both `data` and `rows` values')
if all(isinstance(row, dict) for row in rows):
is_list_of_dict = True # Avoid doing the all(...) test twice.
data = rows
elif isinstance(rows, self.Row):
data = rows
else:
rec_data = recarray_fromrecords(rows)
data = [rec_data[name] for name in rec_data.dtype.names]
# Infer the type of the input data and set up the initialization
# function, number of columns, and potentially the default col names
default_names = None
if hasattr(data, '__astropy_table__'):
# Data object implements the __astropy_table__ interface method.
# Calling that method returns an appropriate instance of
# self.__class__ and respects the `copy` arg. The returned
# Table object should NOT then be copied (though the meta
# will be deep-copied anyway).
data = data.__astropy_table__(self.__class__, copy, **kwargs)
copy = False
elif kwargs:
raise TypeError('__init__() got unexpected keyword argument {!r}'
.format(list(kwargs.keys())[0]))
if (isinstance(data, np.ndarray) and
data.shape == (0,) and
not data.dtype.names):
data = None
if isinstance(data, self.Row):
data = data._table[data._index:data._index + 1]
if isinstance(data, (list, tuple)):
init_func = self._init_from_list
if data and (is_list_of_dict or all(isinstance(row, dict) for row in data)):
n_cols = len(data[0])
else:
n_cols = len(data)
elif isinstance(data, np.ndarray):
if data.dtype.names:
init_func = self._init_from_ndarray # _struct
n_cols = len(data.dtype.names)
default_names = data.dtype.names
else:
init_func = self._init_from_ndarray # _homog
if data.shape == ():
raise ValueError('Can not initialize a Table with a scalar')
elif len(data.shape) == 1:
data = data[np.newaxis, :]
n_cols = data.shape[1]
elif isinstance(data, Mapping):
init_func = self._init_from_dict
default_names = list(data)
n_cols = len(default_names)
elif isinstance(data, Table):
init_func = self._init_from_table
n_cols = len(data.colnames)
default_names = data.colnames
# don't copy indices if the input Table is in non-copy mode
self._init_indices = self._init_indices and data._copy_indices
elif data is None:
if names is None:
if dtype is None:
return # Empty table
try:
# No data nor names but dtype is available. This must be
# valid to initialize a structured array.
dtype = np.dtype(dtype)
names = dtype.names
dtype = [dtype[name] for name in names]
except Exception:
raise ValueError('dtype was specified but could not be '
'parsed for column names')
# names is guaranteed to be set at this point
init_func = self._init_from_list
n_cols = len(names)
data = [[]] * n_cols
else:
raise ValueError('Data type {0} not allowed to init Table'
.format(type(data)))
# Set up defaults if names and/or dtype are not specified.
# A value of None means the actual value will be inferred
# within the appropriate initialization routine, either from
# existing specification or auto-generated.
if names is None:
names = default_names or [None] * n_cols
if dtype is None:
dtype = [None] * n_cols
# Numpy does not support Unicode column names on Python 2, or
# bytes column names on Python 3, so fix them up now.
names = [fix_column_name(name) for name in names]
self._check_names_dtype(names, dtype, n_cols)
# Finally do the real initialization
init_func(data, names, dtype, n_cols, copy)
# Whatever happens above, the masked property should be set to a boolean
if type(self.masked) is not bool:
raise TypeError("masked property has not been set to True or False")
def __getstate__(self):
columns = OrderedDict((key, col if isinstance(col, BaseColumn) else col_copy(col))
for key, col in self.columns.items())
return (columns, self.meta)
def __setstate__(self, state):
columns, meta = state
self.__init__(columns, meta=meta)
@property
def mask(self):
# Dynamic view of available masks
if self.masked:
mask_table = Table([col.mask for col in self.columns.values()],
names=self.colnames, copy=False)
# Set hidden attribute to force inplace setitem so that code like
# t.mask['a'] = [1, 0, 1] will correctly set the underlying mask.
# See #5556 for discussion.
mask_table._setitem_inplace = True
else:
mask_table = None
return mask_table
@mask.setter
def mask(self, val):
self.mask[:] = val
@property
def _mask(self):
"""This is needed so that comparison of a masked Table and a
MaskedArray works. The requirement comes from numpy.ma.core
so don't remove this property."""
return self.as_array().mask
def filled(self, fill_value=None):
"""Return a copy of self, with masked values filled.
If input ``fill_value`` supplied then that value is used for all
masked entries in the table. Otherwise the individual
``fill_value`` defined for each table column is used.
Parameters
----------
fill_value : str
If supplied, this ``fill_value`` is used for all masked entries
in the entire table.
Returns
-------
filled_table : Table
New table with masked values filled
"""
if self.masked:
data = [col.filled(fill_value) for col in six.itervalues(self.columns)]
else:
data = self
return self.__class__(data, meta=deepcopy(self.meta))
@property
def indices(self):
'''
Return the indices associated with columns of the table
as a TableIndices object.
'''
lst = []
for column in self.columns.values():
for index in column.info.indices:
if sum([index is x for x in lst]) == 0: # ensure uniqueness
lst.append(index)
return TableIndices(lst)
@property
def loc(self):
'''
Return a TableLoc object that can be used for retrieving
rows by index in a given data range. Note that both loc
and iloc work only with single-column indices.
'''
return TableLoc(self)
@property
def iloc(self):
'''
Return a TableILoc object that can be used for retrieving
indexed rows in the order they appear in the index.
'''
return TableILoc(self)
def add_index(self, colnames, engine=None, unique=False):
'''
Insert a new index among one or more columns.
If there are no indices, make this index the
primary table index.
Parameters
----------
colnames : str or list
List of column names (or a single column name) to index
engine : type or None
Indexing engine class to use, from among SortedArray, BST,
FastBST, and FastRBT. If the supplied argument is None (by
default), use SortedArray.
unique : bool (defaults to False)
Whether the values of the index must be unique
'''
if isinstance(colnames, six.string_types):
colnames = (colnames,)
columns = self.columns[tuple(colnames)].values()
# make sure all columns support indexing
for col in columns:
if not getattr(col.info, '_supports_indexing', False):
raise ValueError('Cannot create an index on column "{0}", of '
'type "{1}"'.format(col.info.name, type(col)))
index = Index(columns, engine=engine, unique=unique)
if not self.indices:
self.primary_key = colnames
for col in columns:
col.info.indices.append(index)
def remove_indices(self, colname):
'''
Remove all indices involving the given column.
If the primary index is removed, the new primary
index will be the most recently added remaining
index.
Parameters
----------
colname : str
Name of column
'''
col = self.columns[colname]
for index in self.indices:
try:
index.col_position(col.info.name)
except ValueError:
pass
else:
for c in index.columns:
c.info.indices.remove(index)
def index_mode(self, mode):
'''
Return a context manager for an indexing mode.
Parameters
----------
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications in an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
'''
return _IndexModeContext(self, mode)
def __array__(self, dtype=None):
"""Support converting Table to np.array via np.array(table).
Coercion to a different dtype via np.array(table, dtype) is not
supported and will raise a ValueError.
"""
if dtype is not None:
raise ValueError('Datatype coercion is not allowed')
# This limitation is because of the following unexpected result that
# should have made a table copy while changing the column names.
#
# >>> d = astropy.table.Table([[1,2],[3,4]])
# >>> np.array(d, dtype=[('a', 'i8'), ('b', 'i8')])
# array([(0, 0), (0, 0)],
# dtype=[('a', '<i8'), ('b', '<i8')])
return self.as_array().data if self.masked else self.as_array()
def _check_names_dtype(self, names, dtype, n_cols):
"""Make sure that names and dtype are both iterable and have
the same length as data.
"""
for inp_list, inp_str in ((dtype, 'dtype'), (names, 'names')):
if not isiterable(inp_list):
raise ValueError('{0} must be a list or None'.format(inp_str))
if len(names) != n_cols or len(dtype) != n_cols:
raise ValueError(
'Arguments "names" and "dtype" must match number of columns'
.format(inp_str))
def _set_masked_from_cols(self, cols):
if self.masked is None:
if any(isinstance(col, (MaskedColumn, ma.MaskedArray)) for col in cols):
self._set_masked(True)
else:
self._set_masked(False)
elif not self.masked:
if any(np.any(col.mask) for col in cols if isinstance(col, (MaskedColumn, ma.MaskedArray))):
self._set_masked(True)
def _init_from_list_of_dicts(self, data, names, dtype, n_cols, copy):
names_from_data = set()
for row in data:
names_from_data.update(row)
cols = {}
for name in names_from_data:
cols[name] = []
for i, row in enumerate(data):
try:
cols[name].append(row[name])
except KeyError:
raise ValueError('Row {0} has no value for column {1}'.format(i, name))
if all(name is None for name in names):
names = sorted(names_from_data)
self._init_from_dict(cols, names, dtype, n_cols, copy)
return
def _init_from_list(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of columns. A column can be a
Column object, np.ndarray, mixin, or any other iterable object.
"""
if data and all(isinstance(row, dict) for row in data):
self._init_from_list_of_dicts(data, names, dtype, n_cols, copy)
return
# Set self.masked appropriately, then get class to create column instances.
self._set_masked_from_cols(data)
cols = []
def_names = _auto_names(n_cols)
for col, name, def_name, dtype in zip(data, names, def_names, dtype):
# Structured ndarray gets viewed as a mixin
if isinstance(col, np.ndarray) and len(col.dtype) > 1:
col = col.view(NdarrayMixin)
if isinstance(col, (Column, MaskedColumn)):
col = self.ColumnClass(name=(name or col.info.name or def_name),
data=col, dtype=dtype,
copy=copy, copy_indices=self._init_indices)
elif self._add_as_mixin_column(col):
# Copy the mixin column attributes if they exist since the copy below
# may not get this attribute.
if copy:
col = col_copy(col, copy_indices=self._init_indices)
col.info.name = name or col.info.name or def_name
elif isinstance(col, np.ndarray) or isiterable(col):
col = self.ColumnClass(name=(name or def_name), data=col, dtype=dtype,
copy=copy, copy_indices=self._init_indices)
else:
raise ValueError('Elements in list initialization must be '
'either Column or list-like')
cols.append(col)
self._init_from_cols(cols)
def _init_from_ndarray(self, data, names, dtype, n_cols, copy):
"""Initialize table from an ndarray structured array"""
data_names = data.dtype.names or _auto_names(n_cols)
struct = data.dtype.names is not None
names = [name or data_names[i] for i, name in enumerate(names)]
cols = ([data[name] for name in data_names] if struct else
[data[:, i] for i in range(n_cols)])
# Set self.masked appropriately, then get class to create column instances.
self._set_masked_from_cols(cols)
if copy:
self._init_from_list(cols, names, dtype, n_cols, copy)
else:
dtype = [(name, col.dtype, col.shape[1:]) for name, col in zip(names, cols)]
newdata = data.view(dtype).ravel()
columns = self.TableColumns()
for name in names:
columns[name] = self.ColumnClass(name=name, data=newdata[name])
columns[name].info.parent_table = self
self.columns = columns
def _init_from_dict(self, data, names, dtype, n_cols, copy):
"""Initialize table from a dictionary of columns"""
# TODO: is this restriction still needed with no ndarray?
if not copy:
raise ValueError('Cannot use copy=False with a dict data input')
data_list = [data[name] for name in names]
self._init_from_list(data_list, names, dtype, n_cols, copy)
def _init_from_table(self, data, names, dtype, n_cols, copy):
"""Initialize table from an existing Table object """
table = data # data is really a Table, rename for clarity
self.meta.clear()
self.meta.update(deepcopy(table.meta))
self.primary_key = table.primary_key
cols = list(table.columns.values())
self._init_from_list(cols, names, dtype, n_cols, copy)
def _convert_col_for_table(self, col):
"""
Make sure that all Column objects have correct class for this type of
Table. For a base Table this most commonly means setting to
MaskedColumn if the table is masked. Table subclasses like QTable
override this method.
"""
if col.__class__ is not self.ColumnClass and isinstance(col, Column):
col = self.ColumnClass(col) # copy attributes and reference data
return col
def _init_from_cols(self, cols):
"""Initialize table from a list of Column or mixin objects"""
lengths = set(len(col) for col in cols)
if len(lengths) != 1:
raise ValueError('Inconsistent data column lengths: {0}'
.format(lengths))
# Set the table masking
self._set_masked_from_cols(cols)
# Make sure that all Column-based objects have correct class. For
# plain Table this is self.ColumnClass, but for instance QTable will
# convert columns with units to a Quantity mixin.
newcols = [self._convert_col_for_table(col) for col in cols]
self._make_table_from_cols(self, newcols)
# Deduplicate indices. It may happen that after pickling or when
# initing from an existing table that column indices which had been
# references to a single index object got *copied* into an independent
# object. This results in duplicates which will cause downstream problems.
index_dict = {}
for col in self.itercols():
for i, index in enumerate(col.info.indices or []):
names = tuple(ind_col.info.name for ind_col in index.columns)
if names in index_dict:
col.info.indices[i] = index_dict[names]
else:
index_dict[names] = index
def _new_from_slice(self, slice_):
"""Create a new table as a referenced slice from self."""
table = self.__class__(masked=self.masked)
table.meta.clear()
table.meta.update(deepcopy(self.meta))
table.primary_key = self.primary_key
cols = self.columns.values()
newcols = []
for col in cols:
col.info._copy_indices = self._copy_indices
newcol = col[slice_]
if col.info.indices:
newcol = col.info.slice_indices(newcol, slice_, len(col))
newcols.append(newcol)
col.info._copy_indices = True
self._make_table_from_cols(table, newcols)
return table
@staticmethod
def _make_table_from_cols(table, cols):
"""
Make ``table`` in-place so that it represents the given list of ``cols``.
"""
colnames = set(col.info.name for col in cols)
if None in colnames:
raise TypeError('Cannot have None for column name')
if len(colnames) != len(cols):
raise ValueError('Duplicate column names')
columns = table.TableColumns((col.info.name, col) for col in cols)
for col in cols:
col.info.parent_table = table
if table.masked and not hasattr(col, 'mask'):
col.mask = FalseArray(col.shape)
table.columns = columns
def itercols(self):
"""
Iterate over the columns of this table.
Examples
--------
To iterate over the columns of a table::
>>> t = Table([[1], [2]])
>>> for col in t.itercols():
... print(col)
col0
----
1
col1
----
2
Using ``itercols()`` is similar to ``for col in t.columns.values()``
but is syntactically preferred.
"""
for colname in self.columns:
yield self[colname]
def _base_repr_(self, html=False, descr_vals=None, max_width=None,
tableid=None, show_dtype=True, max_lines=None,
tableclass=None):
if descr_vals is None:
descr_vals = [self.__class__.__name__]
if self.masked:
descr_vals.append('masked=True')
descr_vals.append('length={0}'.format(len(self)))
descr = '<' + ' '.join(descr_vals) + '>\n'
if html:
from ..utils.xml.writer import xml_escape
descr = xml_escape(descr)
if tableid is None:
tableid = 'table{id}'.format(id=id(self))
data_lines, outs = self.formatter._pformat_table(
self, tableid=tableid, html=html, max_width=max_width,
show_name=True, show_unit=None, show_dtype=show_dtype,
max_lines=max_lines, tableclass=tableclass)
out = descr + '\n'.join(data_lines)
if six.PY2 and isinstance(out, six.text_type):
out = out.encode('utf-8')
return out
def _repr_html_(self):
return self._base_repr_(html=True, max_width=-1,
tableclass=conf.default_notebook_table_class)
def __repr__(self):
return self._base_repr_(html=False, max_width=None)
def __unicode__(self):
return '\n'.join(self.pformat())
if not six.PY2:
__str__ = __unicode__
def __bytes__(self):
return six.text_type(self).encode('utf-8')
if six.PY2:
__str__ = __bytes__
@property
def has_mixin_columns(self):
"""
True if table has any mixin columns (defined as columns that are not Column
subclasses)
"""
return any(has_info_class(col, MixinInfo) for col in self.columns.values())
def _add_as_mixin_column(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
if isinstance(col, BaseColumn):
return False
# Is it a mixin but not not Quantity (which gets converted to Column with
# unit set).
return has_info_class(col, MixinInfo) and not isinstance(col, Quantity)
def pprint(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False, align=None):
"""Print a formatted string representation of the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for max_width except the configuration item is
``astropy.conf.max_width``.
Parameters
----------
max_lines : int
Maximum number of lines in table output
max_width : int or `None`
Maximum character width of output
show_name : bool
Include a header row for column names (default=True)
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes (default=True)
align : str or list or tuple or `None`
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
"""
lines, outs = self.formatter._pformat_table(self, max_lines, max_width,
show_name=show_name, show_unit=show_unit,
show_dtype=show_dtype, align=align)
if outs['show_length']:
lines.append('Length = {0} rows'.format(len(self)))
n_header = outs['n_header']
for i, line in enumerate(lines):
if i < n_header:
color_print(line, 'red')
else:
print(line)
def _make_index_row_display_table(self, index_row_name):
if index_row_name not in self.columns:
idx_col = self.ColumnClass(name=index_row_name, data=np.arange(len(self)))
return self.__class__([idx_col] + self.columns.values(),
copy=False)
else:
return self
def show_in_notebook(self, tableid=None, css=None, display_length=50,
table_class='astropy-default', show_row_index='idx'):
"""Render the table in HTML and show it in the IPython notebook.
Parameters
----------
tableid : str or `None`
An html ID tag for the table. Default is ``table{id}-XXX``, where
id is the unique integer id of the table object, id(self), and XXX
is a random number to avoid conflicts when printing the same table
multiple times.
table_class : str or `None`
A string with a list of HTML classes used to style the table.
The special default string ('astropy-default') means that the string
will be retrieved from the configuration item
``astropy.table.default_notebook_table_class``. Note that these
table classes may make use of bootstrap, as this is loaded with the
notebook. See `this page <http://getbootstrap.com/css/#tables>`_
for the list of classes.
css : string
A valid CSS string declaring the formatting for the table. Default
to ``astropy.table.jsviewer.DEFAULT_CSS_NB``.
display_length : int, optional
Number or rows to show. Defaults to 50.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
Notes
-----
Currently, unlike `show_in_browser` (with ``jsviewer=True``), this
method needs to access online javascript code repositories. This is due
to modern browsers' limitations on accessing local files. Hence, if you
call this method while offline (and don't have a cached version of
jquery and jquery.dataTables), you will not get the jsviewer features.
"""
from .jsviewer import JSViewer
from IPython.display import HTML
if tableid is None:
tableid = 'table{0}-{1}'.format(id(self),
np.random.randint(1, 1e6))
jsv = JSViewer(display_length=display_length)
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
if table_class == 'astropy-default':
table_class = conf.default_notebook_table_class
html = display_table._base_repr_(html=True, max_width=-1, tableid=tableid,
max_lines=-1, show_dtype=False,
tableclass=table_class)
columns = display_table.columns.values()
sortable_columns = [i for i, col in enumerate(columns)
if col.dtype.kind in 'iufc']
html += jsv.ipynb(tableid, css=css, sort_columns=sortable_columns)
return HTML(html)
def show_in_browser(self, max_lines=5000, jsviewer=False,
browser='default', jskwargs={'use_local_files': True},
tableid=None, table_class="display compact",
css=None, show_row_index='idx'):
"""Render the table in HTML and show it in a web browser.
Parameters
----------
max_lines : int
Maximum number of rows to export to the table (set low by default
to avoid memory issues, since the browser view requires duplicating
the table in memory). A negative value of ``max_lines`` indicates
no row limit.
jsviewer : bool
If `True`, prepends some javascript headers so that the table is
rendered as a `DataTables <https://datatables.net>`_ data table.
This allows in-browser searching & sorting.
browser : str
Any legal browser name, e.g. ``'firefox'``, ``'chrome'``,
``'safari'`` (for mac, you may need to use ``'open -a
"/Applications/Google Chrome.app" {}'`` for Chrome). If
``'default'``, will use the system default browser.
jskwargs : dict
Passed to the `astropy.table.JSViewer` init. Defaults to
``{'use_local_files': True}`` which means that the JavaScript
libraries will be served from local copies.
tableid : str or `None`
An html ID tag for the table. Default is ``table{id}``, where id
is the unique integer id of the table object, id(self).
table_class : str or `None`
A string with a list of HTML classes used to style the table.
Default is "display compact", and other possible values can be
found in http://www.datatables.net/manual/styling/classes
css : string
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS``.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
"""
import os
import webbrowser
import tempfile
from ..extern.six.moves.urllib.parse import urljoin
from ..extern.six.moves.urllib.request import pathname2url
from .jsviewer import DEFAULT_CSS
if css is None:
css = DEFAULT_CSS
# We can't use NamedTemporaryFile here because it gets deleted as
# soon as it gets garbage collected.
tmpdir = tempfile.mkdtemp()
path = os.path.join(tmpdir, 'table.html')
with open(path, 'w') as tmp:
if jsviewer:
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
display_table.write(tmp, format='jsviewer', css=css,
max_lines=max_lines, jskwargs=jskwargs,
table_id=tableid, table_class=table_class)
else:
self.write(tmp, format='html')
try:
br = webbrowser.get(None if browser == 'default' else browser)
except webbrowser.Error:
log.error("Browser '{}' not found.".format(browser))
else:
br.open(urljoin('file:', pathname2url(path)))
def pformat(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False, html=False, tableid=None,
align=None, tableclass=None):
"""Return a list of lines for the formatted string representation of
the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
Parameters
----------
max_lines : int or `None`
Maximum number of rows to output
max_width : int or `None`
Maximum character width of output
show_name : bool
Include a header row for column names (default=True)
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes (default=True)
html : bool
Format the output as an HTML table (default=False)
tableid : str or `None`
An ID tag for the table; only used if html is set. Default is
"table{id}", where id is the unique integer id of the table object,
id(self)
align : str or list or tuple or `None`
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
tableclass : str or list of str or `None`
CSS classes for the table; only used if html is set. Default is
none
Returns
-------
lines : list
Formatted table as a list of strings
"""
lines, outs = self.formatter._pformat_table(
self, max_lines, max_width, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype, html=html,
tableid=tableid, tableclass=tableclass, align=align)
if outs['show_length']:
lines.append('Length = {0} rows'.format(len(self)))
return lines
def more(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False):
"""Interactively browse table with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output
max_width : int or `None`
Maximum character width of output
show_name : bool
Include a header row for column names (default=True)
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes (default=True)
"""
self.formatter._more_tabcol(self, max_lines, max_width, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype)
def __getitem__(self, item):
if isinstance(item, six.string_types):
return self.columns[item]
elif isinstance(item, (int, np.integer)):
return self.Row(self, item)
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return self.Row(self, item.item())
elif (isinstance(item, (tuple, list)) and item and
all(isinstance(x, six.string_types) for x in item)):
bad_names = [x for x in item if x not in self.colnames]
if bad_names:
raise ValueError('Slice name(s) {0} not valid column name(s)'
.format(', '.join(bad_names)))
out = self.__class__([self[x] for x in item],
meta=deepcopy(self.meta),
copy_indices=self._copy_indices)
out._groups = groups.TableGroups(out, indices=self.groups._indices,
keys=self.groups._keys)
return out
elif ((isinstance(item, np.ndarray) and item.size == 0) or
(isinstance(item, (tuple, list)) and not item)):
# If item is an empty array/list/tuple then return the table with no rows
return self._new_from_slice([])
elif (isinstance(item, slice) or
isinstance(item, np.ndarray) or
isinstance(item, list) or
isinstance(item, tuple) and all(isinstance(x, np.ndarray)
for x in item)):
# here for the many ways to give a slice; a tuple of ndarray
# is produced by np.where, as in t[np.where(t['a'] > 2)]
# For all, a new table is constructed with slice of all columns
return self._new_from_slice(item)
else:
raise ValueError('Illegal type {0} for table item access'
.format(type(item)))
def __setitem__(self, item, value):
# If the item is a string then it must be the name of a column.
# If that column doesn't already exist then create it now.
if isinstance(item, six.string_types) and item not in self.colnames:
NewColumn = self.MaskedColumn if self.masked else self.Column
# If value doesn't have a dtype and won't be added as a mixin then
# convert to a numpy array.
if not hasattr(value, 'dtype') and not self._add_as_mixin_column(value):
value = np.asarray(value)
# Structured ndarray gets viewed as a mixin
if isinstance(value, np.ndarray) and len(value.dtype) > 1:
value = value.view(NdarrayMixin)
# Make new column and assign the value. If the table currently
# has no rows (len=0) of the value is already a Column then
# define new column directly from value. In the latter case
# this allows for propagation of Column metadata. Otherwise
# define a new column with the right length and shape and then
# set it from value. This allows for broadcasting, e.g. t['a']
# = 1.
name = item
# If this is a column-like object that could be added directly to table
if isinstance(value, BaseColumn) or self._add_as_mixin_column(value):
# If we're setting a new column to a scalar, broadcast it.
# (things will fail in _init_from_cols if this doesn't work)
if (len(self) > 0 and (getattr(value, 'isscalar', False) or
getattr(value, 'shape', None) == () or
len(value) == 1)):
new_shape = (len(self),) + getattr(value, 'shape', ())[1:]
if isinstance(value, np.ndarray):
value = np_broadcast_to(value, shape=new_shape,
subok=True)
elif isinstance(value, ShapedLikeNDArray):
value = value._apply(np_broadcast_to, shape=new_shape,
subok=True)
new_column = col_copy(value)
new_column.info.name = name
elif len(self) == 0:
new_column = NewColumn(value, name=name)
else:
new_column = NewColumn(name=name, length=len(self), dtype=value.dtype,
shape=value.shape[1:],
unit=getattr(value, 'unit', None))
new_column[:] = value
# Now add new column to the table
self.add_columns([new_column], copy=False)
else:
n_cols = len(self.columns)
if isinstance(item, six.string_types):
# Set an existing column by first trying to replace, and if
# this fails do an in-place update. See definition of mask
# property for discussion of the _setitem_inplace attribute.
if (not getattr(self, '_setitem_inplace', False)
and not conf.replace_inplace):
try:
self._replace_column_warnings(item, value)
return
except Exception:
pass
self.columns[item][:] = value
elif isinstance(item, (int, np.integer)):
# Set the corresponding row assuming value is an iterable.
if not hasattr(value, '__len__'):
raise TypeError('Right side value must be iterable')
if len(value) != n_cols:
raise ValueError('Right side value needs {0} elements (one for each column)'
.format(n_cols))
for col, val in zip(self.columns.values(), value):
col[item] = val
elif (isinstance(item, slice) or
isinstance(item, np.ndarray) or
isinstance(item, list) or
(isinstance(item, tuple) and # output from np.where
all(isinstance(x, np.ndarray) for x in item))):
if isinstance(value, Table):
vals = (col for col in value.columns.values())
elif isinstance(value, np.ndarray) and value.dtype.names:
vals = (value[name] for name in value.dtype.names)
elif np.isscalar(value):
import itertools
vals = itertools.repeat(value, n_cols)
else: # Assume this is an iterable that will work
if len(value) != n_cols:
raise ValueError('Right side value needs {0} elements (one for each column)'
.format(n_cols))
vals = value
for col, val in zip(self.columns.values(), vals):
col[item] = val
else:
raise ValueError('Illegal type {0} for table item access'
.format(type(item)))
def __delitem__(self, item):
if isinstance(item, six.string_types):
self.remove_column(item)
elif isinstance(item, tuple):
self.remove_columns(item)
def field(self, item):
"""Return column[item] for recarray compatibility."""
return self.columns[item]
@property
def masked(self):
return self._masked
@masked.setter
def masked(self, masked):
raise Exception('Masked attribute is read-only (use t = Table(t, masked=True)'
' to convert to a masked table)')
def _set_masked(self, masked):
"""
Set the table masked property.
Parameters
----------
masked : bool
State of table masking (`True` or `False`)
"""
if hasattr(self, '_masked'):
# The only allowed change is from None to False or True, or False to True
if self._masked is None and masked in [False, True]:
self._masked = masked
elif self._masked is False and masked is True:
log.info("Upgrading Table to masked Table. Use Table.filled() to convert to unmasked table.")
self._masked = masked
elif self._masked is masked:
raise Exception("Masked attribute is already set to {0}".format(masked))
else:
raise Exception("Cannot change masked attribute to {0} once it is set to {1}"
.format(masked, self._masked))
else:
if masked in [True, False, None]:
self._masked = masked
else:
raise ValueError("masked should be one of True, False, None")
if self._masked:
self._column_class = self.MaskedColumn
else:
self._column_class = self.Column
@property
def ColumnClass(self):
if self._column_class is None:
return self.Column
else:
return self._column_class
@property
def dtype(self):
return np.dtype([descr(col) for col in self.columns.values()])
@property
def colnames(self):
return list(self.columns.keys())
def keys(self):
return list(self.columns.keys())
def __len__(self):
if len(self.columns) == 0:
return 0
lengths = set(len(col) for col in self.columns.values())
if len(lengths) != 1:
len_strs = [' {0} : {1}'.format(name, len(col)) for name, col in self.columns.items()]
raise ValueError('Column length mismatch:\n{0}'.format('\n'.join(len_strs)))
return lengths.pop()
def index_column(self, name):
"""
Return the positional index of column ``name``.
Parameters
----------
name : str
column name
Returns
-------
index : int
Positional index of column ``name``.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Get index of column 'b' of the table::
>>> t.index_column('b')
1
"""
try:
return self.colnames.index(name)
except ValueError:
raise ValueError("Column {0} does not exist".format(name))
def add_column(self, col, index=None, rename_duplicate=False):
"""
Add a new Column object ``col`` to the table. If ``index``
is supplied then insert column before ``index`` position
in the list of columns, otherwise append column to the end
of the list.
Parameters
----------
col : Column
Column object to add.
index : int or `None`
Insert column before this position or at end (default)
rename_duplicate : bool
Uniquify column name if it already exist (default=False)
Examples
--------
Create a table with two columns 'a' and 'b'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> print(t)
a b
--- ---
1 0.1
2 0.2
3 0.3
Create a third column 'c' and append it to the end of the table::
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> t.add_column(col_c)
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Add column 'd' at position 1. Note that the column is inserted
before the given index::
>>> col_d = Column(name='d', data=['a', 'b', 'c'])
>>> t.add_column(col_d, 1)
>>> print(t)
a d b c
--- --- --- ---
1 a 0.1 x
2 b 0.2 y
3 c 0.3 z
Add second column named 'b' with rename_duplicate::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> col_b = Column(name='b', data=[1.1, 1.2, 1.3])
>>> t.add_column(col_b, rename_duplicate=True)
>>> print(t)
a b b_1
--- --- ---
1 0.1 1.1
2 0.2 1.2
3 0.3 1.3
To add several columns use add_columns.
"""
if index is None:
index = len(self.columns)
self.add_columns([col], [index], rename_duplicate=rename_duplicate)
def add_columns(self, cols, indexes=None, copy=True, rename_duplicate=False):
"""
Add a list of new Column objects ``cols`` to the table. If a
corresponding list of ``indexes`` is supplied then insert column
before each ``index`` position in the *original* list of columns,
otherwise append columns to the end of the list.
Parameters
----------
cols : list of Columns
Column objects to add.
indexes : list of ints or `None`
Insert column before this position or at end (default)
copy : bool
Make a copy of the new columns (default=True)
rename_duplicate : bool
Uniquify new column names if they duplicate the existing ones
(default=False)
Examples
--------
Create a table with two columns 'a' and 'b'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> print(t)
a b
--- ---
1 0.1
2 0.2
3 0.3
Create column 'c' and 'd' and append them to the end of the table::
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> col_d = Column(name='d', data=['u', 'v', 'w'])
>>> t.add_columns([col_c, col_d])
>>> print(t)
a b c d
--- --- --- ---
1 0.1 x u
2 0.2 y v
3 0.3 z w
Add column 'c' at position 0 and column 'd' at position 1. Note that
the columns are inserted before the given position::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> col_d = Column(name='d', data=['u', 'v', 'w'])
>>> t.add_columns([col_c, col_d], [0, 1])
>>> print(t)
c a d b
--- --- --- ---
x 1 u 0.1
y 2 v 0.2
z 3 w 0.3
Add second column 'b' and column 'c' with ``rename_duplicate``::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> col_b = Column(name='b', data=[1.1, 1.2, 1.3])
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> t.add_columns([col_b, col_c], rename_duplicate=True)
>>> print(t)
a b b_1 c
--- --- --- ---
1 0.1 1.1 x
2 0.2 1.2 y
3 0.3 1.3 z
"""
if indexes is None:
indexes = [len(self.columns)] * len(cols)
elif len(indexes) != len(cols):
raise ValueError('Number of indexes must match number of cols')
if copy:
cols = [col_copy(col) for col in cols]
if len(self.columns) == 0:
# No existing table data, init from cols
newcols = cols
else:
newcols = list(self.columns.values())
new_indexes = list(range(len(newcols) + 1))
for col, index in zip(cols, indexes):
i = new_indexes.index(index)
new_indexes.insert(i, None)
newcols.insert(i, col)
if rename_duplicate:
existing_names = set(self.colnames)
for col in cols:
i = 1
orig_name = col.info.name
while col.info.name in existing_names:
# If the column belongs to another table then copy it
# before renaming
if col.info.parent_table is not None:
col = col_copy(col)
new_name = '{0}_{1}'.format(orig_name, i)
col.info.name = new_name
i += 1
existing_names.add(new_name)
self._init_from_cols(newcols)
def _replace_column_warnings(self, name, col):
"""
Same as replace_column but issues warnings under various circumstances.
"""
warns = conf.replace_warnings
if 'refcount' in warns and name in self.colnames:
refcount = sys.getrefcount(self[name])
if name in self.colnames:
old_col = self[name]
# This may raise an exception (e.g. t['a'] = 1) in which case none of
# the downstream code runs.
self.replace_column(name, col)
if 'always' in warns:
warnings.warn("replaced column '{}'".format(name),
TableReplaceWarning, stacklevel=3)
if 'slice' in warns:
try:
# Check for ndarray-subclass slice. An unsliced instance
# has an ndarray for the base while sliced has the same class
# as parent.
if isinstance(old_col.base, old_col.__class__):
msg = ("replaced column '{}' which looks like an array slice. "
"The new column no longer shares memory with the "
"original array.".format(name))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
except AttributeError:
pass
if 'refcount' in warns:
# Did reference count change?
new_refcount = sys.getrefcount(self[name])
if refcount != new_refcount:
msg = ("replaced column '{}' and the number of references "
"to the column changed.".format(name))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
if 'attributes' in warns:
# Any of the standard column attributes changed?
changed_attrs = []
new_col = self[name]
# Check base DataInfo attributes that any column will have
for attr in DataInfo.attr_names:
if getattr(old_col.info, attr) != getattr(new_col.info, attr):
changed_attrs.append(attr)
if changed_attrs:
msg = ("replaced column '{}' and column attributes {} changed."
.format(name, changed_attrs))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
def replace_column(self, name, col):
"""
Replace column ``name`` with the new ``col`` object.
Parameters
----------
name : str
Name of column to replace
col : column object (list, ndarray, Column, etc)
New column object to replace the existing column
Examples
--------
Replace column 'a' with a float version of itself::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> float_a = t['a'].astype(float)
>>> t.replace_column('a', float_a)
"""
if name not in self.colnames:
raise ValueError('column name {0} is not in the table'.format(name))
if self[name].info.indices:
raise ValueError('cannot replace a table index column')
t = self.__class__([col], names=[name])
cols = OrderedDict(self.columns)
cols[name] = t[name]
self._init_from_cols(cols.values())
def remove_row(self, index):
"""
Remove a row from the table.
Parameters
----------
index : int
Index of row to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove row 1 from the table::
>>> t.remove_row(1)
>>> print(t)
a b c
--- --- ---
1 0.1 x
3 0.3 z
To remove several rows at the same time use remove_rows.
"""
# check the index against the types that work with np.delete
if not isinstance(index, (six.integer_types, np.integer)):
raise TypeError("Row index must be an integer")
self.remove_rows(index)
def remove_rows(self, row_specifier):
"""
Remove rows from the table.
Parameters
----------
row_specifier : slice, int, or array of ints
Specification for rows to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove rows 0 and 2 from the table::
>>> t.remove_rows([0, 2])
>>> print(t)
a b c
--- --- ---
2 0.2 y
Note that there are no warnings if the slice operator extends
outside the data::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_rows(slice(10, 20, 1))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
"""
# Update indices
for index in self.indices:
index.remove_rows(row_specifier)
keep_mask = np.ones(len(self), dtype=np.bool)
keep_mask[row_specifier] = False
columns = self.TableColumns()
for name, col in self.columns.items():
newcol = col[keep_mask]
newcol.info.parent_table = self
columns[name] = newcol
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, '_groups'):
del self._groups
def remove_column(self, name):
"""
Remove a column from the table.
This can also be done with::
del table[name]
Parameters
----------
name : str
Name of column to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove column 'b' from the table::
>>> t.remove_column('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
To remove several columns at the same time use remove_columns.
"""
self.remove_columns([name])
def remove_columns(self, names):
'''
Remove several columns from the table.
Parameters
----------
names : list
A list containing the names of the columns to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove columns 'b' and 'c' from the table::
>>> t.remove_columns(['b', 'c'])
>>> print(t)
a
---
1
2
3
Specifying only a single column also works. Remove column 'b' from the table::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_columns('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
This gives the same as using remove_column.
'''
if isinstance(names, six.string_types):
names = [names]
for name in names:
if name not in self.columns:
raise KeyError("Column {0} does not exist".format(name))
for name in names:
self.columns.pop(name)
def _convert_string_dtype(self, in_kind, out_kind, python3_only):
"""
Convert string-like columns to/from bytestring and unicode (internal only).
Parameters
----------
in_kind : str
Input dtype.kind
out_kind : str
Output dtype.kind
python3_only : bool
Only do this operation for Python 3
"""
if python3_only and six.PY2:
return
# If there are no `in_kind` columns then do nothing
cols = self.columns.values()
if not any(col.dtype.kind == in_kind for col in cols):
return
newcols = []
for col in cols:
if col.dtype.kind == in_kind:
newdtype = re.sub(in_kind, out_kind, col.dtype.str)
newcol = col.__class__(col, dtype=newdtype)
else:
newcol = col
newcols.append(newcol)
self._init_from_cols(newcols)
def convert_bytestring_to_unicode(self, python3_only=False):
"""
Convert bytestring columns (dtype.kind='S') to unicode (dtype.kind='U') assuming
ASCII encoding.
Internally this changes string columns to represent each character in the string
with a 4-byte UCS-4 equivalent, so it is inefficient for memory but allows Python
3 scripts to manipulate string arrays with natural syntax.
The ``python3_only`` parameter is provided as a convenience so that code can
be written in a Python 2 / 3 compatible way::
>>> t = Table.read('my_data.fits')
>>> t.convert_bytestring_to_unicode(python3_only=True)
Parameters
----------
python3_only : bool
Only do this operation for Python 3
"""
self._convert_string_dtype('S', 'U', python3_only)
def convert_unicode_to_bytestring(self, python3_only=False):
"""
Convert ASCII-only unicode columns (dtype.kind='U') to bytestring (dtype.kind='S').
When exporting a unicode string array to a file in Python 3, it may be desirable
to encode unicode columns as bytestrings. This routine takes advantage of numpy
automated conversion which works for strings that are pure ASCII.
The ``python3_only`` parameter is provided as a convenience so that code can
be written in a Python 2 / 3 compatible way::
>>> t.convert_unicode_to_bytestring(python3_only=True)
>>> t.write('my_data.fits')
Parameters
----------
python3_only : bool
Only do this operation for Python 3
"""
self._convert_string_dtype('U', 'S', python3_only)
def keep_columns(self, names):
'''
Keep only the columns specified (remove the others).
Parameters
----------
names : list
A list containing the names of the columns to keep. All other
columns will be removed.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Specifying only a single column name keeps only this column.
Keep only column 'a' of the table::
>>> t.keep_columns('a')
>>> print(t)
a
---
1
2
3
Specifying a list of column names is keeps is also possible.
Keep columns 'a' and 'c' of the table::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.keep_columns(['a', 'c'])
>>> print(t)
a c
--- ---
1 x
2 y
3 z
'''
if isinstance(names, six.string_types):
names = [names]
for name in names:
if name not in self.columns:
raise KeyError("Column {0} does not exist".format(name))
remove = list(set(self.keys()) - set(names))
self.remove_columns(remove)
def rename_column(self, name, new_name):
'''
Rename a column.
This can also be done directly with by setting the ``name`` attribute
for a column::
table[name].name = new_name
TODO: this won't work for mixins
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming column 'a' to 'aa'::
>>> t.rename_column('a' , 'aa')
>>> print(t)
aa b c
--- --- ---
1 3 5
2 4 6
'''
if name not in self.keys():
raise KeyError("Column {0} does not exist".format(name))
self.columns[name].info.name = new_name
def add_row(self, vals=None, mask=None):
"""Add a new row to the end of the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
This method requires that the Table object "owns" the underlying array
data. In particular one cannot add a row to a Table that was
initialized with copy=False from an existing array.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or `None`
Use the specified values in the new row
mask : tuple, list, dict or `None`
Use the specified mask values in the new row
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
Adding a new row with entries '3' in 'a', '6' in 'b' and '9' in 'c'::
>>> t.add_row([3,6,9])
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
3 6 9
"""
self.insert_row(len(self), vals, mask)
def insert_row(self, index, vals=None, mask=None):
"""Add a new row before the given ``index`` position in the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or `None`
Use the specified values in the new row
mask : tuple, list, dict or `None`
Use the specified mask values in the new row
"""
colnames = self.colnames
N = len(self)
if index < -N or index > N:
raise IndexError("Index {0} is out of bounds for table with length {1}"
.format(index, N))
if index < 0:
index += N
def _is_mapping(obj):
"""Minimal checker for mapping (dict-like) interface for obj"""
attrs = ('__getitem__', '__len__', '__iter__', 'keys', 'values', 'items')
return all(hasattr(obj, attr) for attr in attrs)
if mask is not None and not self.masked:
# Possibly issue upgrade warning and update self.ColumnClass. This
# does not change the existing columns.
self._set_masked(True)
if _is_mapping(vals) or vals is None:
# From the vals and/or mask mappings create the corresponding lists
# that have entries for each table column.
if mask is not None and not _is_mapping(mask):
raise TypeError("Mismatch between type of vals and mask")
# Now check that the mask is specified for the same keys as the
# values, otherwise things get really confusing.
if mask is not None and set(vals.keys()) != set(mask.keys()):
raise ValueError('keys in mask should match keys in vals')
if vals and any(name not in colnames for name in vals):
raise ValueError('Keys in vals must all be valid column names')
vals_list = []
mask_list = []
for name in colnames:
if vals and name in vals:
vals_list.append(vals[name])
mask_list.append(False if mask is None else mask[name])
else:
col = self[name]
if hasattr(col, 'dtype'):
# Make a placeholder zero element of the right type which is masked.
# This assumes the appropriate insert() method will broadcast a
# numpy scalar to the right shape.
vals_list.append(np.zeros(shape=(), dtype=col.dtype))
# For masked table any unsupplied values are masked by default.
mask_list.append(self.masked and vals is not None)
else:
raise ValueError("Value must be supplied for column '{0}'".format(name))
vals = vals_list
mask = mask_list
if isiterable(vals):
if mask is not None and (not isiterable(mask) or _is_mapping(mask)):
raise TypeError("Mismatch between type of vals and mask")
if len(self.columns) != len(vals):
raise ValueError('Mismatch between number of vals and columns')
if mask is not None:
if len(self.columns) != len(mask):
raise ValueError('Mismatch between number of masks and columns')
else:
mask = [False] * len(self.columns)
else:
raise TypeError('Vals must be an iterable or mapping or None')
columns = self.TableColumns()
try:
# Insert val at index for each column
for name, col, val, mask_ in zip(colnames, self.columns.values(), vals, mask):
# If the new row caused a change in self.ColumnClass then
# Column-based classes need to be converted first. This is
# typical for adding a row with mask values to an unmasked table.
if isinstance(col, Column) and not isinstance(col, self.ColumnClass):
col = self.ColumnClass(col, copy=False)
newcol = col.insert(index, val)
if not isinstance(newcol, BaseColumn):
newcol.info.name = name
if self.masked:
newcol.mask = FalseArray(newcol.shape)
if len(newcol) != N + 1:
raise ValueError('Incorrect length for column {0} after inserting {1}'
' (expected {2}, got {3})'
.format(name, val, len(newcol), N + 1))
newcol.info.parent_table = self
# Set mask if needed
if self.masked:
newcol.mask[index] = mask_
columns[name] = newcol
# insert row in indices
for table_index in self.indices:
table_index.insert_row(index, vals, self.columns.values())
except Exception as err:
raise ValueError("Unable to insert row because of exception in column '{0}':\n{1}"
.format(name, err))
else:
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, '_groups'):
del self._groups
def _replace_cols(self, columns):
for col, new_col in zip(self.columns.values(), columns.values()):
new_col.info.indices = []
for index in col.info.indices:
index.columns[index.col_position(col.info.name)] = new_col
new_col.info.indices.append(index)
self.columns = columns
def argsort(self, keys=None, kind=None):
"""
Return the indices which would sort the table according to one or
more key columns. This simply calls the `numpy.argsort` function on
the table with the ``order`` parameter set to ``keys``.
Parameters
----------
keys : str or list of str
The column name(s) to order the table by
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
Returns
-------
index_array : ndarray, int
Array of indices that sorts the table by the specified key
column(s).
"""
if isinstance(keys, six.string_types):
keys = [keys]
# use index sorted order if possible
if keys is not None:
index = get_index(self, self[keys])
if index is not None:
return index.sorted_data()
kwargs = {}
if keys:
kwargs['order'] = keys
if kind:
kwargs['kind'] = kind
if keys:
data = self[keys].as_array()
else:
data = self.as_array()
return data.argsort(**kwargs)
def sort(self, keys=None):
'''
Sort the table according to one or more keys. This operates
on the existing table and does not return a new table.
Parameters
----------
keys : str or list of str
The key(s) to order the table by. If None, use the
primary index of the Table.
Examples
--------
Create a table with 3 columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Sorting according to standard sorting rules, first 'name' then 'firstname'::
>>> t.sort(['name','firstname'])
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
'''
if keys is None:
if not self.indices:
raise ValueError("Table sort requires input keys or a table index")
keys = [x.info.name for x in self.indices[0].columns]
if isinstance(keys, six.string_types):
keys = [keys]
indexes = self.argsort(keys)
sort_index = get_index(self, self[keys])
if sort_index is not None:
# avoid inefficient relabelling of sorted index
prev_frozen = sort_index._frozen
sort_index._frozen = True
for col in self.columns.values():
col[:] = col.take(indexes, axis=0)
if sort_index is not None:
# undo index freeze
sort_index._frozen = prev_frozen
# now relabel the sort index appropriately
sort_index.sort()
def reverse(self):
'''
Reverse the row order of table rows. The table is reversed
in place and there are no function arguments.
Examples
--------
Create a table with three columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Reversing order::
>>> t.reverse()
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
'''
for col in self.columns.values():
col[:] = col[::-1]
for index in self.indices:
index.reverse()
@classmethod
def read(cls, *args, **kwargs):
"""
Read and parse a data table and return as a Table.
This function provides the Table interface to the astropy unified I/O
layer. This allows easily reading a file in many supported data formats
using syntax such as::
>>> from astropy.table import Table
>>> dat = Table.read('table.dat', format='ascii')
>>> events = Table.read('events.fits', format='fits')
The arguments and keywords (other than ``format``) provided to this function are
passed through to the underlying data reader (e.g. `~astropy.io.ascii.read`).
"""
return io_registry.read(cls, *args, **kwargs)
def write(self, *args, **kwargs):
"""
Write this Table object out in the specified format.
This function provides the Table interface to the astropy unified I/O
layer. This allows easily writing a file in many supported data formats
using syntax such as::
>>> from astropy.table import Table
>>> dat = Table([[1, 2], [3, 4]], names=('a', 'b'))
>>> dat.write('table.dat', format='ascii')
The arguments and keywords (other than ``format``) provided to this function are
passed through to the underlying data reader (e.g. `~astropy.io.ascii.write`).
"""
io_registry.write(self, *args, **kwargs)
def copy(self, copy_data=True):
'''
Return a copy of the table.
Parameters
----------
copy_data : bool
If `True` (the default), copy the underlying data array.
Otherwise, use the same data array
.. note::
The ``meta`` is always deepcopied regardless of the value for
``copy_data``.
'''
out = self.__class__(self, copy=copy_data)
# If the current table is grouped then do the same in the copy
if hasattr(self, '_groups'):
out._groups = groups.TableGroups(out, indices=self._groups._indices,
keys=self._groups._keys)
return out
def __deepcopy__(self, memo=None):
return self.copy(True)
def __copy__(self):
return self.copy(False)
def __lt__(self, other):
if six.PY2:
raise TypeError("unorderable types: Table() < {0}".
format(str(type(other))))
else:
return super(Table, self).__lt__(other)
def __gt__(self, other):
if six.PY2:
raise TypeError("unorderable types: Table() > {0}".
format(str(type(other))))
else:
return super(Table, self).__gt__(other)
def __le__(self, other):
if six.PY2:
raise TypeError("unorderable types: Table() <= {0}".
format(str(type(other))))
else:
return super(Table, self).__le__(other)
def __ge__(self, other):
if six.PY2:
raise TypeError("unorderable types: Table() >= {0}".
format(str(type(other))))
else:
return super(Table, self).__ge__(other)
def __eq__(self, other):
if isinstance(other, Table):
other = other.as_array()
if self.masked:
if isinstance(other, np.ma.MaskedArray):
result = self.as_array() == other
else:
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in self.dtype.names])
result = (self.as_array().data == other) & (self.mask == false_mask)
else:
if isinstance(other, np.ma.MaskedArray):
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in other.dtype.names])
result = (self.as_array() == other.data) & (other.mask == false_mask)
else:
result = self.as_array() == other
return result
def __ne__(self, other):
return ~self.__eq__(other)
@property
def groups(self):
if not hasattr(self, '_groups'):
self._groups = groups.TableGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this table by the specified ``keys``
This effectively splits the table into groups which correspond to
unique values of the ``keys`` grouping object. The output is a new
`TableGroups` which contains a copy of this table but sorted by row
according to ``keys``.
The ``keys`` input to `group_by` can be specified in different ways:
- String or list of strings corresponding to table column name(s)
- Numpy array (homogeneous or structured) with same length as this table
- `Table` with same length as this table
Parameters
----------
keys : str, list of str, numpy array, or `Table`
Key grouping object
Returns
-------
out : `Table`
New table with groups set
"""
if self.has_mixin_columns:
raise NotImplementedError('group_by not available for tables with mixin columns')
return groups.table_group_by(self, keys)
def to_pandas(self):
"""
Return a :class:`pandas.DataFrame` instance
Returns
-------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
Raises
------
ImportError
If pandas is not installed
ValueError
If the Table contains mixin or multi-dimensional columns
"""
from pandas import DataFrame
if self.has_mixin_columns:
raise ValueError("Cannot convert a table with mixin columns to a pandas DataFrame")
if any(getattr(col, 'ndim', 1) > 1 for col in self.columns.values()):
raise ValueError("Cannot convert a table with multi-dimensional columns to a pandas DataFrame")
out = OrderedDict()
for name, column in self.columns.items():
if isinstance(column, MaskedColumn):
if column.dtype.kind in ['i', 'u']:
out[name] = column.astype(float).filled(np.nan)
elif column.dtype.kind in ['f', 'c']:
out[name] = column.filled(np.nan)
else:
out[name] = column.astype(np.object).filled(np.nan)
else:
out[name] = column
if out[name].dtype.byteorder not in ('=', '|'):
out[name] = out[name].byteswap().newbyteorder()
return DataFrame(out)
@classmethod
def from_pandas(cls, dataframe):
"""
Create a `Table` from a :class:`pandas.DataFrame` instance
Parameters
----------
dataframe : :class:`pandas.DataFrame`
The pandas :class:`pandas.DataFrame` instance
Returns
-------
table : `Table`
A `Table` (or subclass) instance
"""
out = OrderedDict()
for name in dataframe.columns:
column = dataframe[name]
mask = np.array(column.isnull())
data = np.array(column)
if data.dtype.kind == 'O':
# If all elements of an object array are string-like or np.nan
# then coerce back to a native numpy str/unicode array.
string_types = six.string_types
if not six.PY2:
string_types += (bytes,)
nan = np.nan
if all(isinstance(x, string_types) or x is nan for x in data):
# Force any missing (null) values to b''. Numpy will
# upcast to str/unicode as needed.
data[mask] = b''
# When the numpy object array is represented as a list then
# numpy initializes to the correct string or unicode type.
data = np.array([x for x in data])
if np.any(mask):
out[name] = MaskedColumn(data=data, name=name, mask=mask)
else:
out[name] = Column(data=data, name=name)
return cls(out)
info = TableInfo()
class QTable(Table):
"""A class to represent tables of heterogeneous data.
`QTable` provides a class for heterogeneous tabular data which can be
easily modified, for instance adding columns or new rows.
The `QTable` class is identical to `Table` except that columns with an
associated ``unit`` attribute are converted to `~astropy.units.Quantity`
objects.
Parameters
----------
data : numpy ndarray, dict, list, Table, or table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names
dtype : list, optional
Specify column data types
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data (default=True).
rows : numpy ndarray, list of lists, optional
Row-oriented data for table instead of ``data`` argument
copy_indices : bool, optional
Copy any indices in the input data (default=True)
**kwargs : dict, optional
Additional keyword args when converting table-like object
"""
def _add_as_mixin_column(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
return has_info_class(col, MixinInfo)
def _convert_col_for_table(self, col):
if (isinstance(col, Column) and getattr(col, 'unit', None) is not None):
# We need to turn the column into a quantity, or a subclass
# identified in the unit (such as u.mag()).
q_cls = getattr(col.unit, '_quantity_class', Quantity)
qcol = q_cls(col.data, col.unit, copy=False)
qcol.info = col.info
col = qcol
else:
col = super(QTable, self)._convert_col_for_table(col)
return col
class NdarrayMixin(np.ndarray):
"""
Mixin column class to allow storage of arbitrary numpy
ndarrays within a Table. This is a subclass of numpy.ndarray
and has the same initialization options as ndarray().
"""
info = ParentDtypeInfo()
def __new__(cls, obj, *args, **kwargs):
self = np.array(obj, *args, **kwargs).view(cls)
if 'info' in getattr(obj, '__dict__', ()):
self.info = obj.info
return self
def __array_finalize__(self, obj):
if obj is None:
return
if six.callable(super(NdarrayMixin, self).__array_finalize__):
super(NdarrayMixin, self).__array_finalize__(obj)
# Self was created from template (e.g. obj[slice] or (obj * 2))
# or viewcast e.g. obj.view(Column). In either case we want to
# init Column attributes for self from obj if possible.
if 'info' in getattr(obj, '__dict__', ()):
self.info = obj.info
def __reduce__(self):
# patch to pickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/numpy-discussion@scipy.org/msg02446.html
object_state = list(super(NdarrayMixin, self).__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle NdarrayMixin objects (ndarray subclasses), see
# http://www.mail-archive.com/numpy-discussion@scipy.org/msg02446.html
nd_state, own_state = state
super(NdarrayMixin, self).__setstate__(nd_state)
self.__dict__.update(own_state)
| bsd-3-clause |
NeCTAR-RC/python-neutronclient | neutronclient/tests/functional/test_readonly_neutron.py | 4 | 8539 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import unittest2
from tempest_lib import exceptions
from neutronclient.tests.functional import base
class SimpleReadOnlyNeutronClientTest(base.ClientTestBase):
"""This is a first pass at a simple read only python-neutronclient test.
This only exercises client commands that are read only.
This should test commands:
* as a regular user
* as a admin user
* with and without optional parameters
* initially just check return codes, and later test command outputs
"""
def test_admin_fake_action(self):
self.assertRaises(exceptions.CommandFailed,
self.neutron,
'this-does-neutron-exist')
# NOTE(mestery): Commands in order listed in 'neutron help'
# Optional arguments:
def test_neutron_fake_action(self):
self.assertRaises(exceptions.CommandFailed,
self.neutron,
'this-does-not-exist')
def test_neutron_net_list(self):
net_list = self.parser.listing(self.neutron('net-list'))
self.assertTableStruct(net_list, ['id', 'name', 'subnets'])
def test_neutron_ext_list(self):
ext = self.parser.listing(self.neutron('ext-list'))
self.assertTableStruct(ext, ['alias', 'name'])
def test_neutron_dhcp_agent_list_hosting_net(self):
self.neutron('dhcp-agent-list-hosting-net',
params='private')
def test_neutron_agent_list(self):
agents = self.parser.listing(self.neutron('agent-list'))
field_names = ['id', 'agent_type', 'host', 'alive', 'admin_state_up']
self.assertTableStruct(agents, field_names)
def test_neutron_floatingip_list(self):
self.neutron('floatingip-list')
def test_neutron_meter_label_list(self):
self.neutron('meter-label-list')
def test_neutron_meter_label_rule_list(self):
self.neutron('meter-label-rule-list')
def _test_neutron_lbaas_command(self, command):
try:
self.neutron(command)
except exceptions.CommandFailed as e:
if '404 Not Found' not in e.stderr:
self.fail('%s: Unexpected failure.' % command)
def test_neutron_lb_healthmonitor_list(self):
self._test_neutron_lbaas_command('lb-healthmonitor-list')
def test_neutron_lb_member_list(self):
self._test_neutron_lbaas_command('lb-member-list')
def test_neutron_lb_pool_list(self):
self._test_neutron_lbaas_command('lb-pool-list')
def test_neutron_lb_vip_list(self):
self._test_neutron_lbaas_command('lb-vip-list')
def test_neutron_net_external_list(self):
net_ext_list = self.parser.listing(self.neutron('net-external-list'))
self.assertTableStruct(net_ext_list, ['id', 'name', 'subnets'])
def test_neutron_port_list(self):
port_list = self.parser.listing(self.neutron('port-list'))
self.assertTableStruct(port_list, ['id', 'name', 'mac_address',
'fixed_ips'])
def test_neutron_quota_list(self):
self.neutron('quota-list')
def test_neutron_router_list(self):
router_list = self.parser.listing(self.neutron('router-list'))
self.assertTableStruct(router_list, ['id', 'name',
'external_gateway_info'])
def test_neutron_security_group_list(self):
security_grp = self.parser.listing(self.neutron('security-group-list'))
self.assertTableStruct(security_grp, ['id', 'name',
'security_group_rules'])
def test_neutron_security_group_rule_list(self):
security_grp = self.parser.listing(self.neutron
('security-group-rule-list'))
self.assertTableStruct(security_grp, ['id', 'security_group',
'direction', 'ethertype',
'protocol/port', 'remote'])
def test_neutron_subnet_list(self):
subnet_list = self.parser.listing(self.neutron('subnet-list'))
self.assertTableStruct(subnet_list, ['id', 'name', 'cidr',
'allocation_pools'])
@unittest2.skip("Skipping until 1484148 is resolved")
def test_neutron_vpn_ikepolicy_list(self):
ikepolicy = self.parser.listing(self.neutron('vpn-ikepolicy-list'))
self.assertTableStruct(ikepolicy, ['id', 'name',
'auth_algorithm',
'encryption_algorithm',
'ike_version', 'pfs'])
@unittest2.skip("Skipping until 1484148 is resolved")
def test_neutron_vpn_ipsecpolicy_list(self):
ipsecpolicy = self.parser.listing(self.neutron('vpn-ipsecpolicy-list'))
self.assertTableStruct(ipsecpolicy, ['id', 'name',
'auth_algorithm',
'encryption_algorithm',
'pfs'])
@unittest2.skip("Skipping until 1484148 is resolved")
def test_neutron_vpn_service_list(self):
vpn_list = self.parser.listing(self.neutron('vpn-service-list'))
self.assertTableStruct(vpn_list, ['id', 'name',
'router_id', 'status'])
@unittest2.skip("Skipping until 1484148 is resolved")
def test_neutron_ipsec_site_connection_list(self):
ipsec_site = self.parser.listing(self.neutron
('ipsec-site-connection-list'))
self.assertTableStruct(ipsec_site, ['id', 'name',
'peer_address',
'peer_cidrs',
'route_mode',
'auth_mode', 'status'])
def test_neutron_firewall_list(self):
firewall_list = self.parser.listing(self.neutron
('firewall-list'))
self.assertTableStruct(firewall_list, ['id', 'name',
'firewall_policy_id'])
def test_neutron_firewall_policy_list(self):
firewall_policy = self.parser.listing(self.neutron
('firewall-policy-list'))
self.assertTableStruct(firewall_policy, ['id', 'name',
'firewall_rules'])
def test_neutron_firewall_rule_list(self):
firewall_rule = self.parser.listing(self.neutron
('firewall-rule-list'))
self.assertTableStruct(firewall_rule, ['id', 'name',
'firewall_policy_id',
'summary', 'enabled'])
def test_neutron_help(self):
help_text = self.neutron('help')
lines = help_text.split('\n')
self.assertFirstLineStartsWith(lines, 'usage: neutron')
commands = []
cmds_start = lines.index('Commands for API v2.0:')
command_pattern = re.compile('^ {2}([a-z0-9\-\_]+)')
for line in lines[cmds_start:]:
match = command_pattern.match(line)
if match:
commands.append(match.group(1))
commands = set(commands)
wanted_commands = set(('net-create', 'subnet-list', 'port-delete',
'router-show', 'agent-update', 'help'))
self.assertFalse(wanted_commands - commands)
# Optional arguments:
def test_neutron_version(self):
self.neutron('', flags='--version')
def test_neutron_debug_net_list(self):
self.neutron('net-list', flags='--debug')
def test_neutron_quiet_net_list(self):
self.neutron('net-list', flags='--quiet')
| apache-2.0 |
ankurankan/pgmpy | pgmpy/inference/bn_inference.py | 2 | 8890 | from pgmpy.inference import Inference
from pgmpy.models import BayesianNetwork
import pandas as pd
import numpy as np
import networkx as nx
import itertools
class BayesianModelInference(Inference):
"""
Inference class specific to Bayesian Models
"""
def __init__(self, model):
"""
Class to calculate probability (pmf) values specific to Bayesian Models
Parameters
----------
model: Bayesian Model
model on which inference queries will be computed
"""
if not isinstance(model, BayesianNetwork):
raise TypeError(
"Model expected type: BayesianNetwork, got type: ", type(model)
)
super(BayesianModelInference, self).__init__(model)
self._initialize_structures()
self.topological_order = list(nx.topological_sort(model))
def pre_compute_reduce(self, variable):
"""
Get probability arrays for a node as function of conditional dependencies
Internal function used for Bayesian networks, eg. in BayesianModelSampling
and BayesianModelProbability.
Parameters
----------
variable: Bayesian Model Node
node of the Bayesian network
Returns
-------
dict: dictionary with probability array for node
as function of conditional dependency values
"""
variable_cpd = self.model.get_cpds(variable)
variable_evid = variable_cpd.variables[:0:-1]
cached_values = {}
for state_combination in itertools.product(
*[range(self.cardinality[var]) for var in variable_evid]
):
states = list(zip(variable_evid, state_combination))
cached_values[state_combination] = variable_cpd.reduce(
states, inplace=False, show_warnings=False
).values
return cached_values
def pre_compute_reduce_maps(self, variable):
"""
Get probability array-maps for a node as function of conditional dependencies
Internal function used for Bayesian networks, eg. in BayesianModelSampling
and BayesianModelProbability.
Parameters
----------
variable: Bayesian Model Node
node of the Bayesian network
Returns
-------
dict: dictionary with probability array-index for node as function of conditional dependency values,
dictionary with mapping of probability array-index to probability array.
"""
variable_cpd = self.model.get_cpds(variable)
variable_evid = variable_cpd.variables[:0:-1]
state_combinations = [
tuple(sc)
for sc in itertools.product(
*[range(self.cardinality[var]) for var in variable_evid]
)
]
weights_list = np.array(
[
variable_cpd.reduce(
list(zip(variable_evid, sc)), inplace=False, show_warnings=False
).values
for sc in state_combinations
]
)
unique_weights, weights_indices = np.unique(
weights_list, axis=0, return_inverse=True
)
# convert weights to index; make mapping of state to index
state_to_index = dict(zip(state_combinations, weights_indices))
# make mapping of index to weights
index_to_weight = dict(enumerate(unique_weights))
# return mappings of state to index, and index to weight
return state_to_index, index_to_weight
class BayesianModelProbability(BayesianModelInference):
"""
Class to calculate probability (pmf) values specific to Bayesian Models
"""
def __init__(self, model):
"""
Class to calculate probability (pmf) values specific to Bayesian Models
Parameters
----------
model: Bayesian Model
model on which inference queries will be computed
"""
super(BayesianModelProbability, self).__init__(model)
def _log_probability_node(self, data, ordering, node):
"""
Evaluate the log probability of each datapoint for a specific node.
Internal function used by log_probability().
Parameters
----------
data: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
ordering: list
ordering of columns in data, used by the Bayesian model.
default is topological ordering used by model.
node: Bayesian Model Node
node from the Bayesian network.
Returns
-------
ndarray: having shape (n_samples,)
The array of log(density) evaluations. These are normalized to be
probability densities, so values will be low for high-dimensional
data.
"""
def vec_translate(a, my_dict):
return np.vectorize(my_dict.__getitem__)(a)
cpd = self.model.get_cpds(node)
# variable to probe: data[n], where n is the node number
current = cpd.variables[0]
current_idx = ordering.index(current)
current_val = data[:, current_idx]
current_no = vec_translate(current_val, cpd.name_to_no[current])
# conditional dependencies E of the probed variable
evidence = cpd.variables[:0:-1]
evidence_idx = [ordering.index(ev) for ev in evidence]
evidence_val = data[:, evidence_idx]
evidence_no = np.empty_like(evidence_val)
for i, ev in enumerate(evidence):
evidence_no[:, i] = vec_translate(evidence_val[:, i], cpd.name_to_no[ev])
if evidence:
# there are conditional dependencies E for data[n] for this node
# Here we retrieve the array: p(x[n]|E). We do this for each x in data.
# We pick the specific node value from the arrays below.
state_to_index, index_to_weight = self.pre_compute_reduce_maps(
variable=node
)
unique, inverse = np.unique(evidence_no, axis=0, return_inverse=True)
weights = np.array(
[index_to_weight[state_to_index[tuple(u)]] for u in unique]
)[inverse]
else:
# there are NO conditional dependencies for this node
# retrieve array: p(x[n]). We do this for each x in data.
# We pick the specific node value from the arrays below.
weights = np.array([cpd.values] * len(data))
# pick the specific node value x[n] from the array p(x[n]|E) or p(x[n])
# We do this for each x in data.
probability_node = np.array([weights[i][cn] for i, cn in enumerate(current_no)])
return np.log(probability_node)
def log_probability(self, data, ordering=None):
"""
Evaluate the logarithmic probability of each point in a data set.
Parameters
----------
data: pandas dataframe OR array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
ordering: list
ordering of columns in data, used by the Bayesian model.
default is topological ordering used by model.
Returns
-------
ndarray: having shape (n_samples,)
The array of log(density) evaluations. These are normalized to be
probability densities, so values will be low for high-dimensional
data.
"""
if isinstance(data, pd.DataFrame):
# use numpy array from now on.
ordering = data.columns.to_list()
data = data.values
if ordering is None:
ordering = self.topological_order
logp = np.array(
[
self._log_probability_node(data, ordering, node)
for node in self.topological_order
]
)
return np.sum(logp, axis=0)
def score(self, data, ordering=None):
"""
Compute the total log probability density under the model.
Parameters
----------
data: pandas dataframe OR array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
ordering: list
ordering of columns in data, used by the Bayesian model.
default is topological ordering used by model.
Returns
-------
float: total log-likelihood of the data in data.
This is normalized to be a probability density, so the value
will be low for high-dimensional data.
"""
return np.sum(self.log_probability(data, ordering))
| mit |
willingc/oh-mainline | vendor/packages/python-social-auth/social/backends/oauth.py | 15 | 16515 | import six
from requests_oauthlib import OAuth1
from oauthlib.oauth1 import SIGNATURE_TYPE_AUTH_HEADER
from social.p3 import urlencode, unquote
from social.utils import url_add_parameters, parse_qs, handle_http_errors
from social.exceptions import AuthFailed, AuthCanceled, AuthUnknownError, \
AuthMissingParameter, AuthStateMissing, \
AuthStateForbidden, AuthTokenError
from social.backends.base import BaseAuth
class OAuthAuth(BaseAuth):
"""OAuth authentication backend base class.
Also settings will be inspected to get more values names that should be
stored on extra_data field. Setting name is created from current backend
name (all uppercase) plus _EXTRA_DATA.
access_token is always stored.
URLs settings:
AUTHORIZATION_URL Authorization service url
ACCESS_TOKEN_URL Access token URL
"""
AUTHORIZATION_URL = ''
ACCESS_TOKEN_URL = ''
ACCESS_TOKEN_METHOD = 'GET'
REVOKE_TOKEN_URL = None
REVOKE_TOKEN_METHOD = 'POST'
ID_KEY = 'id'
SCOPE_PARAMETER_NAME = 'scope'
DEFAULT_SCOPE = None
SCOPE_SEPARATOR = ' '
REDIRECT_STATE = False
STATE_PARAMETER = False
def extra_data(self, user, uid, response, details=None, *args, **kwargs):
"""Return access_token and extra defined names to store in
extra_data field"""
data = super(OAuthAuth, self).extra_data(user, uid, response, details,
*args, **kwargs)
data['access_token'] = response.get('access_token', '') or \
kwargs.get('access_token')
return data
def state_token(self):
"""Generate csrf token to include as state parameter."""
return self.strategy.random_string(32)
def get_or_create_state(self):
if self.STATE_PARAMETER or self.REDIRECT_STATE:
# Store state in session for further request validation. The state
# value is passed as state parameter (as specified in OAuth2 spec),
# but also added to redirect, that way we can still verify the
# request if the provider doesn't implement the state parameter.
# Reuse token if any.
name = self.name + '_state'
state = self.strategy.session_get(name)
if state is None:
state = self.state_token()
self.strategy.session_set(name, state)
else:
state = None
return state
def get_session_state(self):
return self.strategy.session_get(self.name + '_state')
def get_request_state(self):
request_state = self.data.get('state') or \
self.data.get('redirect_state')
if request_state and isinstance(request_state, list):
request_state = request_state[0]
return request_state
def validate_state(self):
"""Validate state value. Raises exception on error, returns state
value if valid."""
if not self.STATE_PARAMETER and not self.REDIRECT_STATE:
return None
state = self.get_session_state()
request_state = self.get_request_state()
if not request_state:
raise AuthMissingParameter(self, 'state')
elif not state:
raise AuthStateMissing(self, 'state')
elif not request_state == state:
raise AuthStateForbidden(self)
else:
return state
def get_redirect_uri(self, state=None):
"""Build redirect with redirect_state parameter."""
uri = self.redirect_uri
if self.REDIRECT_STATE and state:
uri = url_add_parameters(uri, {'redirect_state': state})
return uri
def get_scope(self):
"""Return list with needed access scope"""
scope = self.setting('SCOPE', [])
if not self.setting('IGNORE_DEFAULT_SCOPE', False):
scope = scope + (self.DEFAULT_SCOPE or [])
return scope
def get_scope_argument(self):
param = {}
scope = self.get_scope()
if scope:
param[self.SCOPE_PARAMETER_NAME] = self.SCOPE_SEPARATOR.join(scope)
return param
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service. Implement in subclass"""
return {}
def authorization_url(self):
return self.AUTHORIZATION_URL
def access_token_url(self):
return self.ACCESS_TOKEN_URL
def revoke_token_url(self, token, uid):
return self.REVOKE_TOKEN_URL
def revoke_token_params(self, token, uid):
return {}
def revoke_token_headers(self, token, uid):
return {}
def process_revoke_token_response(self, response):
return response.status_code == 200
def revoke_token(self, token, uid):
if self.REVOKE_TOKEN_URL:
url = self.revoke_token_url(token, uid)
params = self.revoke_token_params(token, uid)
headers = self.revoke_token_headers(token, uid)
data = urlencode(params) if self.REVOKE_TOKEN_METHOD != 'GET' \
else None
response = self.request(url, params=params, headers=headers,
data=data, method=self.REVOKE_TOKEN_METHOD)
return self.process_revoke_token_response(response)
class BaseOAuth1(OAuthAuth):
"""Consumer based mechanism OAuth authentication, fill the needed
parameters to communicate properly with authentication service.
URLs settings:
REQUEST_TOKEN_URL Request token URL
"""
REQUEST_TOKEN_URL = ''
REQUEST_TOKEN_METHOD = 'GET'
OAUTH_TOKEN_PARAMETER_NAME = 'oauth_token'
REDIRECT_URI_PARAMETER_NAME = 'redirect_uri'
UNATHORIZED_TOKEN_SUFIX = 'unauthorized_token_name'
def auth_url(self):
"""Return redirect url"""
token = self.set_unauthorized_token()
return self.oauth_authorization_request(token)
def process_error(self, data):
if 'oauth_problem' in data:
if data['oauth_problem'] == 'user_refused':
raise AuthCanceled(self, 'User refused the access')
raise AuthUnknownError(self, 'Error was ' + data['oauth_problem'])
@handle_http_errors
def auth_complete(self, *args, **kwargs):
"""Return user, might be logged in"""
# Multiple unauthorized tokens are supported (see #521)
self.process_error(self.data)
self.validate_state()
token = self.get_unauthorized_token()
access_token = self.access_token(token)
return self.do_auth(access_token, *args, **kwargs)
@handle_http_errors
def do_auth(self, access_token, *args, **kwargs):
"""Finish the auth process once the access_token was retrieved"""
if not isinstance(access_token, dict):
access_token = parse_qs(access_token)
data = self.user_data(access_token)
if data is not None and 'access_token' not in data:
data['access_token'] = access_token
kwargs.update({'response': data, 'backend': self})
return self.strategy.authenticate(*args, **kwargs)
def get_unauthorized_token(self):
name = self.name + self.UNATHORIZED_TOKEN_SUFIX
unauthed_tokens = self.strategy.session_get(name, [])
if not unauthed_tokens:
raise AuthTokenError(self, 'Missing unauthorized token')
data_token = self.data.get(self.OAUTH_TOKEN_PARAMETER_NAME)
if data_token is None:
raise AuthTokenError(self, 'Missing unauthorized token')
token = None
for utoken in unauthed_tokens:
orig_utoken = utoken
if not isinstance(utoken, dict):
utoken = parse_qs(utoken)
if utoken.get(self.OAUTH_TOKEN_PARAMETER_NAME) == data_token:
self.strategy.session_set(name, list(set(unauthed_tokens) -
set([orig_utoken])))
token = utoken
break
else:
raise AuthTokenError(self, 'Incorrect tokens')
return token
def set_unauthorized_token(self):
token = self.unauthorized_token()
name = self.name + self.UNATHORIZED_TOKEN_SUFIX
tokens = self.strategy.session_get(name, []) + [token]
self.strategy.session_set(name, tokens)
return token
def request_token_extra_arguments(self):
"""Return extra arguments needed on request-token process"""
return self.setting('REQUEST_TOKEN_EXTRA_ARGUMENTS', {})
def unauthorized_token(self):
"""Return request for unauthorized token (first stage)"""
params = self.request_token_extra_arguments()
params.update(self.get_scope_argument())
key, secret = self.get_key_and_secret()
# decoding='utf-8' produces errors with python-requests on Python3
# since the final URL will be of type bytes
decoding = None if six.PY3 else 'utf-8'
state = self.get_or_create_state()
response = self.request(
self.REQUEST_TOKEN_URL,
params=params,
auth=OAuth1(key, secret, callback_uri=self.get_redirect_uri(state),
decoding=decoding),
method=self.REQUEST_TOKEN_METHOD
)
content = response.content
if response.encoding or response.apparent_encoding:
content = content.decode(response.encoding or
response.apparent_encoding)
else:
content = response.content.decode()
return content
def oauth_authorization_request(self, token):
"""Generate OAuth request to authorize token."""
if not isinstance(token, dict):
token = parse_qs(token)
params = self.auth_extra_arguments() or {}
params.update(self.get_scope_argument())
params[self.OAUTH_TOKEN_PARAMETER_NAME] = token.get(
self.OAUTH_TOKEN_PARAMETER_NAME
)
state = self.get_or_create_state()
params[self.REDIRECT_URI_PARAMETER_NAME] = self.get_redirect_uri(state)
return '{0}?{1}'.format(self.authorization_url(), urlencode(params))
def oauth_auth(self, token=None, oauth_verifier=None,
signature_type=SIGNATURE_TYPE_AUTH_HEADER):
key, secret = self.get_key_and_secret()
oauth_verifier = oauth_verifier or self.data.get('oauth_verifier')
if token:
resource_owner_key = token.get('oauth_token')
resource_owner_secret = token.get('oauth_token_secret')
if not resource_owner_key:
raise AuthTokenError(self, 'Missing oauth_token')
if not resource_owner_secret:
raise AuthTokenError(self, 'Missing oauth_token_secret')
else:
resource_owner_key = None
resource_owner_secret = None
# decoding='utf-8' produces errors with python-requests on Python3
# since the final URL will be of type bytes
decoding = None if six.PY3 else 'utf-8'
state = self.get_or_create_state()
return OAuth1(key, secret,
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret,
callback_uri=self.get_redirect_uri(state),
verifier=oauth_verifier,
signature_type=signature_type,
decoding=decoding)
def oauth_request(self, token, url, params=None, method='GET'):
"""Generate OAuth request, setups callback url"""
return self.request(url, method=method, params=params,
auth=self.oauth_auth(token))
def access_token(self, token):
"""Return request for access token value"""
return self.get_querystring(self.access_token_url(),
auth=self.oauth_auth(token),
method=self.ACCESS_TOKEN_METHOD)
class BaseOAuth2(OAuthAuth):
"""Base class for OAuth2 providers.
OAuth2 draft details at:
http://tools.ietf.org/html/draft-ietf-oauth-v2-10
"""
REFRESH_TOKEN_URL = None
REFRESH_TOKEN_METHOD = 'POST'
RESPONSE_TYPE = 'code'
REDIRECT_STATE = True
STATE_PARAMETER = True
def auth_params(self, state=None):
client_id, client_secret = self.get_key_and_secret()
params = {
'client_id': client_id,
'redirect_uri': self.get_redirect_uri(state)
}
if self.STATE_PARAMETER and state:
params['state'] = state
if self.RESPONSE_TYPE:
params['response_type'] = self.RESPONSE_TYPE
return params
def auth_url(self):
"""Return redirect url"""
state = self.get_or_create_state()
params = self.auth_params(state)
params.update(self.get_scope_argument())
params.update(self.auth_extra_arguments())
params = urlencode(params)
if not self.REDIRECT_STATE:
# redirect_uri matching is strictly enforced, so match the
# providers value exactly.
params = unquote(params)
return '{0}?{1}'.format(self.authorization_url(), params)
def auth_complete_params(self, state=None):
client_id, client_secret = self.get_key_and_secret()
return {
'grant_type': 'authorization_code', # request auth code
'code': self.data.get('code', ''), # server response code
'client_id': client_id,
'client_secret': client_secret,
'redirect_uri': self.get_redirect_uri(state)
}
def auth_headers(self):
return {'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'}
def request_access_token(self, *args, **kwargs):
return self.get_json(*args, **kwargs)
def process_error(self, data):
if data.get('error'):
if data['error'] == 'denied' or data['error'] == 'access_denied':
raise AuthCanceled(self, data.get('error_description', ''))
raise AuthFailed(self, data.get('error_description') or
data['error'])
elif 'denied' in data:
raise AuthCanceled(self, data['denied'])
@handle_http_errors
def auth_complete(self, *args, **kwargs):
"""Completes loging process, must return user instance"""
state = self.validate_state()
self.process_error(self.data)
response = self.request_access_token(
self.access_token_url(),
data=self.auth_complete_params(state),
headers=self.auth_headers(),
method=self.ACCESS_TOKEN_METHOD
)
self.process_error(response)
return self.do_auth(response['access_token'], response=response,
*args, **kwargs)
@handle_http_errors
def do_auth(self, access_token, *args, **kwargs):
"""Finish the auth process once the access_token was retrieved"""
data = self.user_data(access_token, *args, **kwargs)
response = kwargs.get('response') or {}
response.update(data or {})
kwargs.update({'response': response, 'backend': self})
return self.strategy.authenticate(*args, **kwargs)
def refresh_token_params(self, token, *args, **kwargs):
client_id, client_secret = self.get_key_and_secret()
return {
'refresh_token': token,
'grant_type': 'refresh_token',
'client_id': client_id,
'client_secret': client_secret
}
def process_refresh_token_response(self, response, *args, **kwargs):
return response.json()
def refresh_token(self, token, *args, **kwargs):
params = self.refresh_token_params(token, *args, **kwargs)
url = self.refresh_token_url()
method = self.REFRESH_TOKEN_METHOD
key = 'params' if method == 'GET' else 'data'
request_args = {'headers': self.auth_headers(),
'method': method,
key: params}
request = self.request(url, **request_args)
return self.process_refresh_token_response(request, *args, **kwargs)
def refresh_token_url(self):
return self.REFRESH_TOKEN_URL or self.access_token_url()
| agpl-3.0 |
vsaw/miniSSL | minissl/keyutils.py | 1 | 4926 | #!/bin/python
from Crypto.Util.asn1 import DerSequence
from Crypto.Cipher import AES
from Crypto.PublicKey import RSA
from Crypto.Hash import HMAC
from Crypto.Random import _UserFriendlyRNG as Random
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
from M2Crypto import X509 as m2x509
from OpenSSL import crypto as ocrypto
from binascii import a2b_base64, hexlify
"""
This will read an RSA public key from a DER binary blob.
The idea for the DER parsing is from:
http://stackoverflow.com/questions/12911373/how-do-i-use-a-x509-certificate-with-pycrypto
Idea:
* Read DER using Crypto.Util.asn1.DerSequence, with decode()
* The first item in the sequence is the certificate...
* ... and the 6th item is the Subject Public Key Info, the only thing
that pycrypto will swallow without complaining
Arguments:
blob -- binary string representing a DER file (read from file)
Returns:
RSA public key for use with pyCrypto (Crypto.PublicKey.RSA)
"""
def read_pubkey_from_der(blob):
cert = DerSequence()
cert.decode(blob)
tbsCertificate = DerSequence()
tbsCertificate.decode(cert[0])
subjectPublicKeyInfo = tbsCertificate[6]
return RSA.importKey(subjectPublicKeyInfo)
"""
This will read an RSA public key from a PEM string.
Idea:
* Convert PEM to DER using binascii
* Call read_pubkey_from_der
Arguments:
pemstring -- String representing a certificate in PEM format
Returns:
RSA public key for use with pyCrypto (Crypto.PublicKey.RSA)
"""
def read_pubkey_from_pem(pemstring):
lines = pemstring.replace(" ",'').split()
der = a2b_base64(''.join(lines[1:-1]))
return read_pubkey_from_der(der)
"""
This will read an RSA private key from a DER binary blob.
Idea:
* Read DER using Crypto.Util.asn1.DerSequence, with decode()
* The first item in the sequence is the certificate...
* ... and the x th item is the Private Key Info
Arguments:
blob -- binary string representing a private key in DER format (read from file)
Returns:
RSA private key for use with pyCrypto (Crypto.PublicKey.RSA)
"""
def read_privkey_from_der(blob):
return RSA.importKey(blob)
"""
This will read an RSA private key from a PEM string.
Idea:
* Convert PEM to DER using binascii
* Call read_privkey_from_der
Arguments:
pemstring -- String representing a private key in PEM format
Returns:
RSA private key for use with pyCrypto (Crypto.PublicKey.RSA)
"""
def read_privkey_from_pem(pemstring):
lines = pemstring.replace(" ",'').split()
der = a2b_base64(''.join(lines[1:-1]))
return read_privkey_from_der(der)
"""
This will encrypt a message with an RSA public key.
Arguments:
msg -- message, String
pk -- public key, Crypto.PublicKey.RSA
Returns:
ciphertext -- binary string
"""
def encrypt_with_rsa_hybrid(msg, pk):
aes_key = generate_key(16)
iv = generate_random(16)
cipher_aes = AES.new(aes_key, AES.MODE_CFB, iv)
aes_enc_msg = cipher_aes.encrypt(msg)
cipher_rsa = PKCS1_OAEP.new(pk)
return (aes_enc_msg, iv, cipher_rsa.encrypt(aes_key))
"""
Generate a random number.
Returns:
random number -- byte array
"""
def generate_random(bytes):
return Random.get_random_bytes(bytes)
"""
Generate random key for AES.
Returns:
key -- byte array
"""
def generate_key(bytes=16):
return generate_random(bytes)
"""
Generate random nonce.
Returns:
nonce -- byte array
"""
def generate_nonce(bytes=28):
return generate_random(bytes)
"""
Read subject of a X.509 certificate.
Arguments:
pem -- String representing a certificate in PEM format
Returns:
String of subject components
"""
def read_subject(pem):
return ocrypto.load_certificate(ocrypto.FILETYPE_PEM, pem).get_subject()
"""
Read issuer of a X.509 certificate.
Arguments:
pem -- String representing a certificate in PEM format
Returns:
Tuple of issuer components
"""
def read_issuer(pem):
return ocrypto.load_certificate(ocrypto.FILETYPE_PEM, pem).get_issuer()
"""
Read notafter of a X.509 certificate.
Arguments:
pem -- String representing a certificate in PEM format
Returns:
String representing notafter
"""
def read_notafter(pem):
return ocrypto.load_certificate(ocrypto.FILETYPE_PEM, pem).get_notAfter()
"""
Verifies the signature of a certificate.
WARNING: Does not validate anything except the signature.
Arguments:
issuer_cert -- issuer certificate, in PEM, String.
cert -- certificate whose signature is to be verified. In PEM, String.
"""
def verify_certificate(issuer_cert, cert):
issuer_pubkey = m2x509.load_cert_string(issuer_cert, m2x509.FORMAT_PEM).get_pubkey()
return m2x509.load_cert_string(cert, m2x509.FORMAT_PEM).verify(issuer_pubkey)
"""
Create a HMAC from a key and data.
Arguments:
secret -- HMAC key, binary array
data -- data to be hashed, binary array
Returns:
HMAC value as hex string
"""
def create_hmac(secret, data):
h = HMAC.new(secret, 'sha')
h.update(data)
return h.hexdigest()
| mit |
fintech-circle/edx-platform | openedx/core/djangoapps/theming/tests/test_util.py | 45 | 3001 | """
Test helpers for Comprehensive Theming.
"""
from functools import wraps
import os
import os.path
import contextlib
import re
from mock import patch
from django.conf import settings
from django.contrib.sites.models import Site
import edxmako
from openedx.core.djangoapps.theming.models import SiteTheme
def with_comprehensive_theme(theme_dir_name):
"""
A decorator to run a test with a comprehensive theming enabled.
Arguments:
theme_dir_name (str): directory name of the site for which we want comprehensive theming enabled.
"""
# This decorator creates Site and SiteTheme models for given domain
def _decorator(func): # pylint: disable=missing-docstring
@wraps(func)
def _decorated(*args, **kwargs): # pylint: disable=missing-docstring
# make a domain name out of directory name
domain = "{theme_dir_name}.org".format(theme_dir_name=re.sub(r"\.org$", "", theme_dir_name))
site, __ = Site.objects.get_or_create(domain=domain, name=domain)
site_theme, __ = SiteTheme.objects.get_or_create(site=site, theme_dir_name=theme_dir_name)
with patch('openedx.core.djangoapps.theming.helpers.get_current_site_theme',
return_value=site_theme):
with patch('openedx.core.djangoapps.theming.helpers.get_current_site', return_value=site):
return func(*args, **kwargs)
return _decorated
return _decorator
@contextlib.contextmanager
def with_comprehensive_theme_context(theme=None):
"""
A function to run a test as if request was made to the given theme.
Arguments:
theme (str): name if the theme or None if no theme is applied
"""
if theme:
domain = '{theme}.org'.format(theme=re.sub(r"\.org$", "", theme))
site, __ = Site.objects.get_or_create(domain=domain, name=theme)
site_theme, __ = SiteTheme.objects.get_or_create(site=site, theme_dir_name=theme)
with patch('openedx.core.djangoapps.theming.helpers.get_current_site_theme',
return_value=site_theme):
with patch('openedx.core.djangoapps.theming.helpers.get_current_site', return_value=site):
yield
else:
yield
def dump_theming_info():
"""Dump a bunch of theming information, for debugging."""
for namespace, lookup in edxmako.LOOKUP.items():
print "--- %s: %s" % (namespace, lookup.template_args['module_directory'])
for directory in lookup.directories:
print " %s" % (directory,)
print "=" * 80
for dirname, __, filenames in os.walk(settings.MAKO_MODULE_DIR):
print "%s ----------------" % (dir,)
for filename in sorted(filenames):
if filename.endswith(".pyc"):
continue
with open(os.path.join(dirname, filename)) as f:
content = len(f.read())
print " %s: %d" % (filename, content)
| agpl-3.0 |
ravibhure/ansible | lib/ansible/modules/network/netscaler/netscaler_lb_monitor.py | 114 | 47437 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: netscaler_lb_monitor
short_description: Manage load balancing monitors
description:
- Manage load balancing monitors.
- This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance.
version_added: "2.4"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
monitorname:
description:
- >-
Name for the monitor. Must begin with an ASCII alphanumeric or underscore C(_) character, and must
contain only ASCII alphanumeric, underscore, hash C(#), period C(.), space C( ), colon C(:), at C(@), equals
C(=), and hyphen C(-) characters.
- "Minimum length = 1"
type:
choices:
- 'PING'
- 'TCP'
- 'HTTP'
- 'TCP-ECV'
- 'HTTP-ECV'
- 'UDP-ECV'
- 'DNS'
- 'FTP'
- 'LDNS-PING'
- 'LDNS-TCP'
- 'LDNS-DNS'
- 'RADIUS'
- 'USER'
- 'HTTP-INLINE'
- 'SIP-UDP'
- 'SIP-TCP'
- 'LOAD'
- 'FTP-EXTENDED'
- 'SMTP'
- 'SNMP'
- 'NNTP'
- 'MYSQL'
- 'MYSQL-ECV'
- 'MSSQL-ECV'
- 'ORACLE-ECV'
- 'LDAP'
- 'POP3'
- 'CITRIX-XML-SERVICE'
- 'CITRIX-WEB-INTERFACE'
- 'DNS-TCP'
- 'RTSP'
- 'ARP'
- 'CITRIX-AG'
- 'CITRIX-AAC-LOGINPAGE'
- 'CITRIX-AAC-LAS'
- 'CITRIX-XD-DDC'
- 'ND6'
- 'CITRIX-WI-EXTENDED'
- 'DIAMETER'
- 'RADIUS_ACCOUNTING'
- 'STOREFRONT'
- 'APPC'
- 'SMPP'
- 'CITRIX-XNC-ECV'
- 'CITRIX-XDM'
- 'CITRIX-STA-SERVICE'
- 'CITRIX-STA-SERVICE-NHOP'
description:
- "Type of monitor that you want to create."
action:
choices:
- 'NONE'
- 'LOG'
- 'DOWN'
description:
- >-
Action to perform when the response to an inline monitor (a monitor of type C(HTTP-INLINE)) indicates
that the service is down. A service monitored by an inline monitor is considered C(DOWN) if the response
code is not one of the codes that have been specified for the Response Code parameter.
- "Available settings function as follows:"
- >-
* C(NONE) - Do not take any action. However, the show service command and the show lb monitor command
indicate the total number of responses that were checked and the number of consecutive error
responses received after the last successful probe.
- "* C(LOG) - Log the event in NSLOG or SYSLOG."
- >-
* C(DOWN) - Mark the service as being down, and then do not direct any traffic to the service until the
configured down time has expired. Persistent connections to the service are terminated as soon as the
service is marked as C(DOWN). Also, log the event in NSLOG or SYSLOG.
respcode:
description:
- >-
Response codes for which to mark the service as UP. For any other response code, the action performed
depends on the monitor type. C(HTTP) monitors and C(RADIUS) monitors mark the service as C(DOWN), while
C(HTTP-INLINE) monitors perform the action indicated by the Action parameter.
httprequest:
description:
- "HTTP request to send to the server (for example, C(\\"HEAD /file.html\\"))."
rtsprequest:
description:
- "RTSP request to send to the server (for example, C(\\"OPTIONS *\\"))."
customheaders:
description:
- "Custom header string to include in the monitoring probes."
maxforwards:
description:
- >-
Maximum number of hops that the SIP request used for monitoring can traverse to reach the server.
Applicable only to monitors of type C(SIP-UDP).
- "Minimum value = C(0)"
- "Maximum value = C(255)"
sipmethod:
choices:
- 'OPTIONS'
- 'INVITE'
- 'REGISTER'
description:
- "SIP method to use for the query. Applicable only to monitors of type C(SIP-UDP)."
sipuri:
description:
- >-
SIP URI string to send to the service (for example, C(sip:sip.test)). Applicable only to monitors of
type C(SIP-UDP).
- "Minimum length = 1"
sipreguri:
description:
- >-
SIP user to be registered. Applicable only if the monitor is of type C(SIP-UDP) and the SIP Method
parameter is set to C(REGISTER).
- "Minimum length = 1"
send:
description:
- "String to send to the service. Applicable to C(TCP-ECV), C(HTTP-ECV), and C(UDP-ECV) monitors."
recv:
description:
- >-
String expected from the server for the service to be marked as UP. Applicable to C(TCP-ECV), C(HTTP-ECV),
and C(UDP-ECV) monitors.
query:
description:
- "Domain name to resolve as part of monitoring the DNS service (for example, C(example.com))."
querytype:
choices:
- 'Address'
- 'Zone'
- 'AAAA'
description:
- >-
Type of DNS record for which to send monitoring queries. Set to C(Address) for querying A records, C(AAAA)
for querying AAAA records, and C(Zone) for querying the SOA record.
scriptname:
description:
- >-
Path and name of the script to execute. The script must be available on the NetScaler appliance, in
the /nsconfig/monitors/ directory.
- "Minimum length = 1"
scriptargs:
description:
- "String of arguments for the script. The string is copied verbatim into the request."
dispatcherip:
description:
- "IP address of the dispatcher to which to send the probe."
dispatcherport:
description:
- "Port number on which the dispatcher listens for the monitoring probe."
username:
description:
- >-
User name with which to probe the C(RADIUS), C(NNTP), C(FTP), C(FTP-EXTENDED), C(MYSQL), C(MSSQL), C(POP3), C(CITRIX-AG),
C(CITRIX-XD-DDC), C(CITRIX-WI-EXTENDED), C(CITRIX-XNC) or C(CITRIX-XDM) server.
- "Minimum length = 1"
password:
description:
- >-
Password that is required for logging on to the C(RADIUS), C(NNTP), C(FTP), C(FTP-EXTENDED), C(MYSQL), C(MSSQL), C(POP3),
C(CITRIX-AG), C(CITRIX-XD-DDC), C(CITRIX-WI-EXTENDED), C(CITRIX-XNC-ECV) or C(CITRIX-XDM) server. Used in
conjunction with the user name specified for the C(username) parameter.
- "Minimum length = 1"
secondarypassword:
description:
- >-
Secondary password that users might have to provide to log on to the Access Gateway server.
Applicable to C(CITRIX-AG) monitors.
logonpointname:
description:
- >-
Name of the logon point that is configured for the Citrix Access Gateway Advanced Access Control
software. Required if you want to monitor the associated login page or Logon Agent. Applicable to
C(CITRIX-AAC-LAS) and C(CITRIX-AAC-LOGINPAGE) monitors.
lasversion:
description:
- >-
Version number of the Citrix Advanced Access Control Logon Agent. Required by the C(CITRIX-AAC-LAS)
monitor.
radkey:
description:
- >-
Authentication key (shared secret text string) for RADIUS clients and servers to exchange. Applicable
to monitors of type C(RADIUS) and C(RADIUS_ACCOUNTING).
- "Minimum length = 1"
radnasid:
description:
- "NAS-Identifier to send in the Access-Request packet. Applicable to monitors of type C(RADIUS)."
- "Minimum length = 1"
radnasip:
description:
- >-
Network Access Server (NAS) IP address to use as the source IP address when monitoring a RADIUS
server. Applicable to monitors of type C(RADIUS) and C(RADIUS_ACCOUNTING).
radaccounttype:
description:
- "Account Type to be used in Account Request Packet. Applicable to monitors of type C(RADIUS_ACCOUNTING)."
- "Minimum value = 0"
- "Maximum value = 15"
radframedip:
description:
- "Source ip with which the packet will go out . Applicable to monitors of type C(RADIUS_ACCOUNTING)."
radapn:
description:
- >-
Called Station Id to be used in Account Request Packet. Applicable to monitors of type
C(RADIUS_ACCOUNTING).
- "Minimum length = 1"
radmsisdn:
description:
- >-
Calling Stations Id to be used in Account Request Packet. Applicable to monitors of type
C(RADIUS_ACCOUNTING).
- "Minimum length = 1"
radaccountsession:
description:
- >-
Account Session ID to be used in Account Request Packet. Applicable to monitors of type
C(RADIUS_ACCOUNTING).
- "Minimum length = 1"
lrtm:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Calculate the least response times for bound services. If this parameter is not enabled, the
appliance does not learn the response times of the bound services. Also used for LRTM load balancing.
deviation:
description:
- >-
Time value added to the learned average response time in dynamic response time monitoring (DRTM).
When a deviation is specified, the appliance learns the average response time of bound services and
adds the deviation to the average. The final value is then continually adjusted to accommodate
response time variations over time. Specified in milliseconds, seconds, or minutes.
- "Minimum value = C(0)"
- "Maximum value = C(20939)"
units1:
choices:
- 'SEC'
- 'MSEC'
- 'MIN'
description:
- "Unit of measurement for the Deviation parameter. Cannot be changed after the monitor is created."
interval:
description:
- "Time interval between two successive probes. Must be greater than the value of Response Time-out."
- "Minimum value = C(1)"
- "Maximum value = C(20940)"
units3:
choices:
- 'SEC'
- 'MSEC'
- 'MIN'
description:
- "monitor interval units."
resptimeout:
description:
- >-
Amount of time for which the appliance must wait before it marks a probe as FAILED. Must be less than
the value specified for the Interval parameter.
- >-
Note: For C(UDP-ECV) monitors for which a receive string is not configured, response timeout does not
apply. For C(UDP-ECV) monitors with no receive string, probe failure is indicated by an ICMP port
unreachable error received from the service.
- "Minimum value = C(1)"
- "Maximum value = C(20939)"
units4:
choices:
- 'SEC'
- 'MSEC'
- 'MIN'
description:
- "monitor response timeout units."
resptimeoutthresh:
description:
- >-
Response time threshold, specified as a percentage of the Response Time-out parameter. If the
response to a monitor probe has not arrived when the threshold is reached, the appliance generates an
SNMP trap called monRespTimeoutAboveThresh. After the response time returns to a value below the
threshold, the appliance generates a monRespTimeoutBelowThresh SNMP trap. For the traps to be
generated, the "MONITOR-RTO-THRESHOLD" alarm must also be enabled.
- "Minimum value = C(0)"
- "Maximum value = C(100)"
retries:
description:
- >-
Maximum number of probes to send to establish the state of a service for which a monitoring probe
failed.
- "Minimum value = C(1)"
- "Maximum value = C(127)"
failureretries:
description:
- >-
Number of retries that must fail, out of the number specified for the Retries parameter, for a
service to be marked as DOWN. For example, if the Retries parameter is set to 10 and the Failure
Retries parameter is set to 6, out of the ten probes sent, at least six probes must fail if the
service is to be marked as DOWN. The default value of 0 means that all the retries must fail if the
service is to be marked as DOWN.
- "Minimum value = C(0)"
- "Maximum value = C(32)"
alertretries:
description:
- >-
Number of consecutive probe failures after which the appliance generates an SNMP trap called
monProbeFailed.
- "Minimum value = C(0)"
- "Maximum value = C(32)"
successretries:
description:
- "Number of consecutive successful probes required to transition a service's state from DOWN to UP."
- "Minimum value = C(1)"
- "Maximum value = C(32)"
downtime:
description:
- >-
Time duration for which to wait before probing a service that has been marked as DOWN. Expressed in
milliseconds, seconds, or minutes.
- "Minimum value = C(1)"
- "Maximum value = C(20939)"
units2:
choices:
- 'SEC'
- 'MSEC'
- 'MIN'
description:
- "Unit of measurement for the Down Time parameter. Cannot be changed after the monitor is created."
destip:
description:
- >-
IP address of the service to which to send probes. If the parameter is set to 0, the IP address of
the server to which the monitor is bound is considered the destination IP address.
destport:
description:
- >-
TCP or UDP port to which to send the probe. If the parameter is set to 0, the port number of the
service to which the monitor is bound is considered the destination port. For a monitor of type C(USER),
however, the destination port is the port number that is included in the HTTP request sent to the
dispatcher. Does not apply to monitors of type C(PING).
state:
choices:
- 'enabled'
- 'disabled'
description:
- >-
State of the monitor. The C(disabled) setting disables not only the monitor being configured, but all
monitors of the same type, until the parameter is set to C(enabled). If the monitor is bound to a
service, the state of the monitor is not taken into account when the state of the service is
determined.
reverse:
description:
- >-
Mark a service as DOWN, instead of UP, when probe criteria are satisfied, and as UP instead of DOWN
when probe criteria are not satisfied.
type: bool
transparent:
description:
- >-
The monitor is bound to a transparent device such as a firewall or router. The state of a transparent
device depends on the responsiveness of the services behind it. If a transparent device is being
monitored, a destination IP address must be specified. The probe is sent to the specified IP address
by using the MAC address of the transparent device.
type: bool
iptunnel:
description:
- >-
Send the monitoring probe to the service through an IP tunnel. A destination IP address must be
specified.
type: bool
tos:
description:
- "Probe the service by encoding the destination IP address in the IP TOS (6) bits."
type: bool
tosid:
description:
- "The TOS ID of the specified destination IP. Applicable only when the TOS parameter is set."
- "Minimum value = C(1)"
- "Maximum value = C(63)"
secure:
description:
- >-
Use a secure SSL connection when monitoring a service. Applicable only to TCP based monitors. The
secure option cannot be used with a C(CITRIX-AG) monitor, because a CITRIX-AG monitor uses a secure
connection by default.
type: bool
validatecred:
description:
- >-
Validate the credentials of the Xen Desktop DDC server user. Applicable to monitors of type
C(CITRIX-XD-DDC).
type: bool
domain:
description:
- >-
Domain in which the XenDesktop Desktop Delivery Controller (DDC) servers or Web Interface servers are
present. Required by C(CITRIX-XD-DDC) and C(CITRIX-WI-EXTENDED) monitors for logging on to the DDC servers
and Web Interface servers, respectively.
ipaddress:
description:
- >-
Set of IP addresses expected in the monitoring response from the DNS server, if the record type is A
or AAAA. Applicable to C(DNS) monitors.
- "Minimum length = 1"
group:
description:
- >-
Name of a newsgroup available on the NNTP service that is to be monitored. The appliance periodically
generates an NNTP query for the name of the newsgroup and evaluates the response. If the newsgroup is
found on the server, the service is marked as UP. If the newsgroup does not exist or if the search
fails, the service is marked as DOWN. Applicable to NNTP monitors.
- "Minimum length = 1"
filename:
description:
- >-
Name of a file on the FTP server. The appliance monitors the FTP service by periodically checking the
existence of the file on the server. Applicable to C(FTP-EXTENDED) monitors.
- "Minimum length = 1"
basedn:
description:
- >-
The base distinguished name of the LDAP service, from where the LDAP server can begin the search for
the attributes in the monitoring query. Required for C(LDAP) service monitoring.
- "Minimum length = 1"
binddn:
description:
- >-
The distinguished name with which an LDAP monitor can perform the Bind operation on the LDAP server.
Optional. Applicable to C(LDAP) monitors.
- "Minimum length = 1"
filter:
description:
- "Filter criteria for the LDAP query. Optional."
- "Minimum length = 1"
attribute:
description:
- >-
Attribute to evaluate when the LDAP server responds to the query. Success or failure of the
monitoring probe depends on whether the attribute exists in the response. Optional.
- "Minimum length = 1"
database:
description:
- "Name of the database to connect to during authentication."
- "Minimum length = 1"
oraclesid:
description:
- "Name of the service identifier that is used to connect to the Oracle database during authentication."
- "Minimum length = 1"
sqlquery:
description:
- >-
SQL query for a C(MYSQL-ECV) or C(MSSQL-ECV) monitor. Sent to the database server after the server
authenticates the connection.
- "Minimum length = 1"
evalrule:
description:
- >-
Default syntax expression that evaluates the database server's response to a MYSQL-ECV or MSSQL-ECV
monitoring query. Must produce a Boolean result. The result determines the state of the server. If
the expression returns TRUE, the probe succeeds.
- >-
For example, if you want the appliance to evaluate the error message to determine the state of the
server, use the rule C(MYSQL.RES.ROW(10) .TEXT_ELEM(2).EQ("MySQL")).
mssqlprotocolversion:
choices:
- '70'
- '2000'
- '2000SP1'
- '2005'
- '2008'
- '2008R2'
- '2012'
- '2014'
description:
- "Version of MSSQL server that is to be monitored."
Snmpoid:
description:
- "SNMP OID for C(SNMP) monitors."
- "Minimum length = 1"
snmpcommunity:
description:
- "Community name for C(SNMP) monitors."
- "Minimum length = 1"
snmpthreshold:
description:
- "Threshold for C(SNMP) monitors."
- "Minimum length = 1"
snmpversion:
choices:
- 'V1'
- 'V2'
description:
- "SNMP version to be used for C(SNMP) monitors."
metrictable:
description:
- "Metric table to which to bind metrics."
- "Minimum length = 1"
- "Maximum length = 99"
application:
description:
- >-
Name of the application used to determine the state of the service. Applicable to monitors of type
C(CITRIX-XML-SERVICE).
- "Minimum length = 1"
sitepath:
description:
- >-
URL of the logon page. For monitors of type C(CITRIX-WEB-INTERFACE), to monitor a dynamic page under the
site path, terminate the site path with a slash C(/). Applicable to C(CITRIX-WEB-INTERFACE),
C(CITRIX-WI-EXTENDED) and C(CITRIX-XDM) monitors.
- "Minimum length = 1"
storename:
description:
- >-
Store Name. For monitors of type C(STOREFRONT), C(storename) is an optional argument defining storefront
service store name. Applicable to C(STOREFRONT) monitors.
- "Minimum length = 1"
storefrontacctservice:
description:
- >-
Enable/Disable probing for Account Service. Applicable only to Store Front monitors. For
multi-tenancy configuration users my skip account service.
type: bool
hostname:
description:
- "Hostname in the FQDN format (Example: C(porche.cars.org)). Applicable to C(STOREFRONT) monitors."
- "Minimum length = 1"
netprofile:
description:
- "Name of the network profile."
- "Minimum length = 1"
- "Maximum length = 127"
originhost:
description:
- >-
Origin-Host value for the Capabilities-Exchange-Request (CER) message to use for monitoring Diameter
servers.
- "Minimum length = 1"
originrealm:
description:
- >-
Origin-Realm value for the Capabilities-Exchange-Request (CER) message to use for monitoring Diameter
servers.
- "Minimum length = 1"
hostipaddress:
description:
- >-
Host-IP-Address value for the Capabilities-Exchange-Request (CER) message to use for monitoring
Diameter servers. If Host-IP-Address is not specified, the appliance inserts the mapped IP (MIP)
address or subnet IP (SNIP) address from which the CER request (the monitoring probe) is sent.
- "Minimum length = 1"
vendorid:
description:
- >-
Vendor-Id value for the Capabilities-Exchange-Request (CER) message to use for monitoring Diameter
servers.
productname:
description:
- >-
Product-Name value for the Capabilities-Exchange-Request (CER) message to use for monitoring Diameter
servers.
- "Minimum length = 1"
firmwarerevision:
description:
- >-
Firmware-Revision value for the Capabilities-Exchange-Request (CER) message to use for monitoring
Diameter servers.
authapplicationid:
description:
- >-
List of Auth-Application-Id attribute value pairs (AVPs) for the Capabilities-Exchange-Request (CER)
message to use for monitoring Diameter servers. A maximum of eight of these AVPs are supported in a
monitoring CER message.
- "Minimum value = C(0)"
- "Maximum value = C(4294967295)"
acctapplicationid:
description:
- >-
List of Acct-Application-Id attribute value pairs (AVPs) for the Capabilities-Exchange-Request (CER)
message to use for monitoring Diameter servers. A maximum of eight of these AVPs are supported in a
monitoring message.
- "Minimum value = C(0)"
- "Maximum value = C(4294967295)"
inbandsecurityid:
choices:
- 'NO_INBAND_SECURITY'
- 'TLS'
description:
- >-
Inband-Security-Id for the Capabilities-Exchange-Request (CER) message to use for monitoring Diameter
servers.
supportedvendorids:
description:
- >-
List of Supported-Vendor-Id attribute value pairs (AVPs) for the Capabilities-Exchange-Request (CER)
message to use for monitoring Diameter servers. A maximum eight of these AVPs are supported in a
monitoring message.
- "Minimum value = C(1)"
- "Maximum value = C(4294967295)"
vendorspecificvendorid:
description:
- >-
Vendor-Id to use in the Vendor-Specific-Application-Id grouped attribute-value pair (AVP) in the
monitoring CER message. To specify Auth-Application-Id or Acct-Application-Id in
Vendor-Specific-Application-Id, use vendorSpecificAuthApplicationIds or
vendorSpecificAcctApplicationIds, respectively. Only one Vendor-Id is supported for all the
Vendor-Specific-Application-Id AVPs in a CER monitoring message.
- "Minimum value = 1"
vendorspecificauthapplicationids:
description:
- >-
List of Vendor-Specific-Auth-Application-Id attribute value pairs (AVPs) for the
Capabilities-Exchange-Request (CER) message to use for monitoring Diameter servers. A maximum of
eight of these AVPs are supported in a monitoring message. The specified value is combined with the
value of vendorSpecificVendorId to obtain the Vendor-Specific-Application-Id AVP in the CER
monitoring message.
- "Minimum value = C(0)"
- "Maximum value = C(4294967295)"
vendorspecificacctapplicationids:
description:
- >-
List of Vendor-Specific-Acct-Application-Id attribute value pairs (AVPs) to use for monitoring
Diameter servers. A maximum of eight of these AVPs are supported in a monitoring message. The
specified value is combined with the value of vendorSpecificVendorId to obtain the
Vendor-Specific-Application-Id AVP in the CER monitoring message.
- "Minimum value = C(0)"
- "Maximum value = C(4294967295)"
kcdaccount:
description:
- "KCD Account used by C(MSSQL) monitor."
- "Minimum length = 1"
- "Maximum length = 32"
storedb:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Store the database list populated with the responses to monitor probes. Used in database specific
load balancing if C(MSSQL-ECV)/C(MYSQL-ECV) monitor is configured.
storefrontcheckbackendservices:
description:
- >-
This option will enable monitoring of services running on storefront server. Storefront services are
monitored by probing to a Windows service that runs on the Storefront server and exposes details of
which storefront services are running.
type: bool
trofscode:
description:
- "Code expected when the server is under maintenance."
trofsstring:
description:
- >-
String expected from the server for the service to be marked as trofs. Applicable to HTTP-ECV/TCP-ECV
monitors.
extends_documentation_fragment: netscaler
requirements:
- nitro python sdk
'''
EXAMPLES = '''
- name: Set lb monitor
local_action:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: nsroot
validate_certs: no
module: netscaler_lb_monitor
state: present
monitorname: monitor_1
type: HTTP-INLINE
action: DOWN
respcode: ['400']
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: ['message 1', 'message 2']
msg:
description: Message detailing the failure reason
returned: failure
type: str
sample: "Action does not exist"
diff:
description: List of differences between the actual configured object and the configuration specified in the module
returned: failure
type: dict
sample: { 'targetlbvserver': 'difference. ours: (str) server1 other: (str) server2' }
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netscaler.netscaler import (
ConfigProxy,
get_nitro_client,
netscaler_common_arguments,
log,
loglines,
ensure_feature_is_enabled,
get_immutables_intersection
)
try:
from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor import lbmonitor
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
PYTHON_SDK_IMPORTED = True
except ImportError as e:
PYTHON_SDK_IMPORTED = False
def lbmonitor_exists(client, module):
log('Checking if monitor exists')
if lbmonitor.count_filtered(client, 'monitorname:%s' % module.params['monitorname']) > 0:
return True
else:
return False
def lbmonitor_identical(client, module, lbmonitor_proxy):
log('Checking if monitor is identical')
count = lbmonitor.count_filtered(client, 'monitorname:%s' % module.params['monitorname'])
if count == 0:
return False
lbmonitor_list = lbmonitor.get_filtered(client, 'monitorname:%s' % module.params['monitorname'])
diff_dict = lbmonitor_proxy.diff_object(lbmonitor_list[0])
# Skipping hashed fields since the cannot be compared directly
hashed_fields = [
'password',
'secondarypassword',
'radkey',
]
for key in hashed_fields:
if key in diff_dict:
del diff_dict[key]
if diff_dict == {}:
return True
else:
return False
def diff_list(client, module, lbmonitor_proxy):
monitor_list = lbmonitor.get_filtered(client, 'monitorname:%s' % module.params['monitorname'])
return lbmonitor_proxy.diff_object(monitor_list[0])
def main():
module_specific_arguments = dict(
monitorname=dict(type='str'),
type=dict(
type='str',
choices=[
'PING',
'TCP',
'HTTP',
'TCP-ECV',
'HTTP-ECV',
'UDP-ECV',
'DNS',
'FTP',
'LDNS-PING',
'LDNS-TCP',
'LDNS-DNS',
'RADIUS',
'USER',
'HTTP-INLINE',
'SIP-UDP',
'SIP-TCP',
'LOAD',
'FTP-EXTENDED',
'SMTP',
'SNMP',
'NNTP',
'MYSQL',
'MYSQL-ECV',
'MSSQL-ECV',
'ORACLE-ECV',
'LDAP',
'POP3',
'CITRIX-XML-SERVICE',
'CITRIX-WEB-INTERFACE',
'DNS-TCP',
'RTSP',
'ARP',
'CITRIX-AG',
'CITRIX-AAC-LOGINPAGE',
'CITRIX-AAC-LAS',
'CITRIX-XD-DDC',
'ND6',
'CITRIX-WI-EXTENDED',
'DIAMETER',
'RADIUS_ACCOUNTING',
'STOREFRONT',
'APPC',
'SMPP',
'CITRIX-XNC-ECV',
'CITRIX-XDM',
'CITRIX-STA-SERVICE',
'CITRIX-STA-SERVICE-NHOP',
]
),
action=dict(
type='str',
choices=[
'NONE',
'LOG',
'DOWN',
]
),
respcode=dict(type='list'),
httprequest=dict(type='str'),
rtsprequest=dict(type='str'),
customheaders=dict(type='str'),
maxforwards=dict(type='float'),
sipmethod=dict(
type='str',
choices=[
'OPTIONS',
'INVITE',
'REGISTER',
]
),
sipuri=dict(type='str'),
sipreguri=dict(type='str'),
send=dict(type='str'),
recv=dict(type='str'),
query=dict(type='str'),
querytype=dict(
type='str',
choices=[
'Address',
'Zone',
'AAAA',
]
),
scriptname=dict(type='str'),
scriptargs=dict(type='str'),
dispatcherip=dict(type='str'),
dispatcherport=dict(type='int'),
username=dict(type='str'),
password=dict(type='str'),
secondarypassword=dict(type='str'),
logonpointname=dict(type='str'),
lasversion=dict(type='str'),
radkey=dict(type='str'),
radnasid=dict(type='str'),
radnasip=dict(type='str'),
radaccounttype=dict(type='float'),
radframedip=dict(type='str'),
radapn=dict(type='str'),
radmsisdn=dict(type='str'),
radaccountsession=dict(type='str'),
lrtm=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
deviation=dict(type='float'),
units1=dict(
type='str',
choices=[
'SEC',
'MSEC',
'MIN',
]
),
interval=dict(type='int'),
units3=dict(
type='str',
choices=[
'SEC',
'MSEC',
'MIN',
]
),
resptimeout=dict(type='int'),
units4=dict(
type='str',
choices=[
'SEC',
'MSEC',
'MIN',
]
),
resptimeoutthresh=dict(type='float'),
retries=dict(type='int'),
failureretries=dict(type='int'),
alertretries=dict(type='int'),
successretries=dict(type='int'),
downtime=dict(type='int'),
units2=dict(
type='str',
choices=[
'SEC',
'MSEC',
'MIN',
]
),
destip=dict(type='str'),
destport=dict(type='int'),
reverse=dict(type='bool'),
transparent=dict(type='bool'),
iptunnel=dict(type='bool'),
tos=dict(type='bool'),
tosid=dict(type='float'),
secure=dict(type='bool'),
validatecred=dict(type='bool'),
domain=dict(type='str'),
ipaddress=dict(type='list'),
group=dict(type='str'),
filename=dict(type='str'),
basedn=dict(type='str'),
binddn=dict(type='str'),
filter=dict(type='str'),
attribute=dict(type='str'),
database=dict(type='str'),
oraclesid=dict(type='str'),
sqlquery=dict(type='str'),
evalrule=dict(type='str'),
mssqlprotocolversion=dict(
type='str',
choices=[
'70',
'2000',
'2000SP1',
'2005',
'2008',
'2008R2',
'2012',
'2014',
]
),
Snmpoid=dict(type='str'),
snmpcommunity=dict(type='str'),
snmpthreshold=dict(type='str'),
snmpversion=dict(
type='str',
choices=[
'V1',
'V2',
]
),
application=dict(type='str'),
sitepath=dict(type='str'),
storename=dict(type='str'),
storefrontacctservice=dict(type='bool'),
hostname=dict(type='str'),
netprofile=dict(type='str'),
originhost=dict(type='str'),
originrealm=dict(type='str'),
hostipaddress=dict(type='str'),
vendorid=dict(type='float'),
productname=dict(type='str'),
firmwarerevision=dict(type='float'),
authapplicationid=dict(type='list'),
acctapplicationid=dict(type='list'),
inbandsecurityid=dict(
type='str',
choices=[
'NO_INBAND_SECURITY',
'TLS',
]
),
supportedvendorids=dict(type='list'),
vendorspecificvendorid=dict(type='float'),
vendorspecificauthapplicationids=dict(type='list'),
vendorspecificacctapplicationids=dict(type='list'),
storedb=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
storefrontcheckbackendservices=dict(type='bool'),
trofscode=dict(type='float'),
trofsstring=dict(type='str'),
)
hand_inserted_arguments = dict()
argument_spec = dict()
argument_spec.update(module_specific_arguments)
argument_spec.update(netscaler_common_arguments)
argument_spec.update(hand_inserted_arguments)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
module_result = dict(
changed=False,
failed=False,
loglines=loglines,
)
# Fail the module if imports failed
if not PYTHON_SDK_IMPORTED:
module.fail_json(msg='Could not load nitro python sdk', **module_result)
# Fallthrough to rest of execution
client = get_nitro_client(module)
try:
client.login()
except nitro_exception as e:
msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg)
except Exception as e:
if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>":
module.fail_json(msg='Connection error %s' % str(e))
elif str(type(e)) == "<class 'requests.exceptions.SSLError'>":
module.fail_json(msg='SSL Error %s' % str(e))
else:
module.fail_json(msg='Unexpected error during login %s' % str(e))
# Instantiate lb monitor object
readwrite_attrs = [
'monitorname',
'type',
'action',
'respcode',
'httprequest',
'rtsprequest',
'customheaders',
'maxforwards',
'sipmethod',
'sipuri',
'sipreguri',
'send',
'recv',
'query',
'querytype',
'scriptname',
'scriptargs',
'dispatcherip',
'dispatcherport',
'username',
'password',
'secondarypassword',
'logonpointname',
'lasversion',
'radkey',
'radnasid',
'radnasip',
'radaccounttype',
'radframedip',
'radapn',
'radmsisdn',
'radaccountsession',
'lrtm',
'deviation',
'units1',
'interval',
'units3',
'resptimeout',
'units4',
'resptimeoutthresh',
'retries',
'failureretries',
'alertretries',
'successretries',
'downtime',
'units2',
'destip',
'destport',
'reverse',
'transparent',
'iptunnel',
'tos',
'tosid',
'secure',
'validatecred',
'domain',
'ipaddress',
'group',
'filename',
'basedn',
'binddn',
'filter',
'attribute',
'database',
'oraclesid',
'sqlquery',
'evalrule',
'mssqlprotocolversion',
'Snmpoid',
'snmpcommunity',
'snmpthreshold',
'snmpversion',
'application',
'sitepath',
'storename',
'storefrontacctservice',
'netprofile',
'originhost',
'originrealm',
'hostipaddress',
'vendorid',
'productname',
'firmwarerevision',
'authapplicationid',
'acctapplicationid',
'inbandsecurityid',
'supportedvendorids',
'vendorspecificvendorid',
'vendorspecificauthapplicationids',
'vendorspecificacctapplicationids',
'storedb',
'storefrontcheckbackendservices',
'trofscode',
'trofsstring',
]
readonly_attrs = [
'lrtmconf',
'lrtmconfstr',
'dynamicresponsetimeout',
'dynamicinterval',
'multimetrictable',
'dup_state',
'dup_weight',
'weight',
]
immutable_attrs = [
'monitorname',
'type',
'units1',
'units3',
'units4',
'units2',
'Snmpoid',
'hostname',
'servicename',
'servicegroupname',
]
transforms = {
'storefrontcheckbackendservices': ['bool_yes_no'],
'secure': ['bool_yes_no'],
'tos': ['bool_yes_no'],
'validatecred': ['bool_yes_no'],
'storefrontacctservice': ['bool_yes_no'],
'iptunnel': ['bool_yes_no'],
'transparent': ['bool_yes_no'],
'reverse': ['bool_yes_no'],
'lrtm': [lambda v: v.upper()],
'storedb': [lambda v: v.upper()],
}
lbmonitor_proxy = ConfigProxy(
actual=lbmonitor(),
client=client,
attribute_values_dict=module.params,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
immutable_attrs=immutable_attrs,
transforms=transforms,
)
try:
ensure_feature_is_enabled(client, 'LB')
if module.params['state'] == 'present':
log('Applying actions for state present')
if not lbmonitor_exists(client, module):
if not module.check_mode:
log('Adding monitor')
lbmonitor_proxy.add()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
elif not lbmonitor_identical(client, module, lbmonitor_proxy):
# Check if we try to change value of immutable attributes
immutables_changed = get_immutables_intersection(lbmonitor_proxy, diff_list(client, module, lbmonitor_proxy).keys())
if immutables_changed != []:
diff = diff_list(client, module, lbmonitor_proxy)
msg = 'Cannot update immutable attributes %s' % (immutables_changed,)
module.fail_json(msg=msg, diff=diff, **module_result)
if not module.check_mode:
log('Updating monitor')
lbmonitor_proxy.update()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
log('Doing nothing for monitor')
module_result['changed'] = False
# Sanity check for result
log('Sanity checks for state present')
if not module.check_mode:
if not lbmonitor_exists(client, module):
module.fail_json(msg='lb monitor does not exist', **module_result)
if not lbmonitor_identical(client, module, lbmonitor_proxy):
module.fail_json(
msg='lb monitor is not configured correctly',
diff=diff_list(client, module, lbmonitor_proxy),
**module_result
)
elif module.params['state'] == 'absent':
log('Applying actions for state absent')
if lbmonitor_exists(client, module):
if not module.check_mode:
lbmonitor_proxy.delete()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for result
log('Sanity checks for state absent')
if not module.check_mode:
if lbmonitor_exists(client, module):
module.fail_json(msg='lb monitor still exists', **module_result)
module_result['actual_attributes'] = lbmonitor_proxy.get_actual_rw_attributes(filter='monitorname')
except nitro_exception as e:
msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg, **module_result)
client.logout()
module.exit_json(**module_result)
if __name__ == "__main__":
main()
| gpl-3.0 |
tanvioka/rdbms-subsetter | test_subsetter.py | 3 | 5202 | import os
import unittest
import tempfile
import sqlite3
from subsetter import Db
class DummyArgs(object):
logarithmic = False
fraction = 0.25
force_rows = {}
children = 25
config = {}
exclude_tables = []
full_tables = []
buffer = 1000
dummy_args = DummyArgs()
class OverallTest(unittest.TestCase):
def setUp(self):
schema = ["CREATE TABLE state (abbrev, name)",
"CREATE TABLE zeppos (name, home_city)",
"""CREATE TABLE city (name, state_abbrev,
FOREIGN KEY (state_abbrev)
REFERENCES state(abbrev))""",
"""CREATE TABLE landmark (name, city,
FOREIGN KEY (city)
REFERENCES city(name))""",
"""CREATE TABLE zeppelins (name, home_city,
FOREIGN KEY (home_city)
REFERENCES city(name))""", # NULL FKs
"""CREATE TABLE languages_better_than_python (name)""", # empty table
]
self.source_db_filename = tempfile.mktemp()
self.source_db = sqlite3.connect(self.source_db_filename)
self.source_sqla = "sqlite:///%s" % self.source_db_filename
self.dest_db_filename = tempfile.mktemp()
self.dest_db = sqlite3.connect(self.dest_db_filename)
self.dest_sqla = "sqlite:///%s" % self.dest_db_filename
for statement in schema:
self.source_db.execute(statement)
self.dest_db.execute(statement)
for params in (('MN', 'Minnesota'), ('OH', 'Ohio'),
('MA', 'Massachussetts'), ('MI', 'Michigan')):
self.source_db.execute("INSERT INTO state VALUES (?, ?)", params)
for params in (('Duluth', 'MN'), ('Dayton', 'OH'),
('Boston', 'MA'), ('Houghton', 'MI')):
self.source_db.execute("INSERT INTO city VALUES (?, ?)", params)
for params in (('Lift Bridge', 'Duluth'), ("Mendelson's", 'Dayton'),
('Trinity Church', 'Boston'), ('Michigan Tech', 'Houghton')):
self.source_db.execute("INSERT INTO landmark VALUES (?, ?)", params)
for params in (('Graf Zeppelin', None), ('USS Los Angeles', None),
('Nordstern', None), ('Bodensee', None)):
self.source_db.execute("INSERT INTO zeppelins VALUES (?, ?)", params)
for params in (('Zeppo Marx', 'New York City'), ):
self.source_db.execute("INSERT INTO zeppos VALUES (?, ?)", params)
self.source_db.commit()
self.dest_db.commit()
def tearDown(self):
self.source_db.close()
os.unlink(self.source_db_filename)
self.dest_db.close()
os.unlink(self.dest_db_filename)
def test_parents_kept(self):
src = Db(self.source_sqla, dummy_args)
dest = Db(self.dest_sqla, dummy_args)
src.assign_target(dest)
src.create_subset_in(dest)
cities = self.dest_db.execute("SELECT * FROM city").fetchall()
self.assertEqual(len(cities), 1)
joined = self.dest_db.execute("""SELECT c.name, s.name
FROM city c JOIN state s
ON (c.state_abbrev = s.abbrev)""")
joined = joined.fetchall()
self.assertEqual(len(joined), 1)
def test_null_foreign_keys(self):
src = Db(self.source_sqla, dummy_args)
dest = Db(self.dest_sqla, dummy_args)
src.assign_target(dest)
src.create_subset_in(dest)
zeppelins = self.dest_db.execute("SELECT * FROM zeppelins").fetchall()
self.assertEqual(len(zeppelins), 1)
def test_exclude_tables(self):
args_with_exclude = DummyArgs()
args_with_exclude.exclude_tables = ['zeppelins',]
src = Db(self.source_sqla, args_with_exclude)
dest = Db(self.dest_sqla, args_with_exclude)
src.assign_target(dest)
src.create_subset_in(dest)
zeppelins = self.dest_db.execute("SELECT * FROM zeppelins").fetchall()
self.assertEqual(len(zeppelins), 0)
def test_full_tables(self):
args_with_full = DummyArgs()
args_with_full.full_tables = ['city',]
src = Db(self.source_sqla, args_with_full)
dest = Db(self.dest_sqla, args_with_full)
src.assign_target(dest)
src.create_subset_in(dest)
cities = self.dest_db.execute("SELECT * FROM city").fetchall()
self.assertEqual(len(cities), 4)
def test_exclude_tables_wildcard(self):
args_with_exclude = DummyArgs()
args_with_exclude.exclude_tables = ['zep*',]
src = Db(self.source_sqla, args_with_exclude)
dest = Db(self.dest_sqla, args_with_exclude)
src.assign_target(dest)
src.create_subset_in(dest)
zeppelins = self.dest_db.execute("SELECT * FROM zeppelins").fetchall()
self.assertEqual(len(zeppelins), 0)
zeppos = self.dest_db.execute("SELECT * FROM zeppos").fetchall()
self.assertEqual(len(zeppos), 0)
| cc0-1.0 |
detiber/ansible | lib/ansible/modules/cloud/rackspace/rax_clb.py | 70 | 9412 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_clb
short_description: create / delete a load balancer in Rackspace Public Cloud
description:
- creates / deletes a Rackspace Public Cloud load balancer.
version_added: "1.4"
options:
algorithm:
description:
- algorithm for the balancer being created
choices:
- RANDOM
- LEAST_CONNECTIONS
- ROUND_ROBIN
- WEIGHTED_LEAST_CONNECTIONS
- WEIGHTED_ROUND_ROBIN
default: LEAST_CONNECTIONS
meta:
description:
- A hash of metadata to associate with the instance
default: null
name:
description:
- Name to give the load balancer
default: null
port:
description:
- Port for the balancer being created
default: 80
protocol:
description:
- Protocol for the balancer being created
choices:
- DNS_TCP
- DNS_UDP
- FTP
- HTTP
- HTTPS
- IMAPS
- IMAPv4
- LDAP
- LDAPS
- MYSQL
- POP3
- POP3S
- SMTP
- TCP
- TCP_CLIENT_FIRST
- UDP
- UDP_STREAM
- SFTP
default: HTTP
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
timeout:
description:
- timeout for communication between the balancer and the node
default: 30
type:
description:
- type of interface for the balancer being created
choices:
- PUBLIC
- SERVICENET
default: PUBLIC
vip_id:
description:
- Virtual IP ID to use when creating the load balancer for purposes of
sharing an IP with another load balancer of another protocol
version_added: 1.5
wait:
description:
- wait for the balancer to be in state 'running' before returning
default: "no"
choices:
- "yes"
- "no"
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: Build a Load Balancer
gather_facts: False
hosts: local
connection: local
tasks:
- name: Load Balancer create request
local_action:
module: rax_clb
credentials: ~/.raxpub
name: my-lb
port: 8080
protocol: HTTP
type: SERVICENET
timeout: 30
region: DFW
wait: yes
state: present
meta:
app: my-cool-app
register: my_lb
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
vip_type, timeout, wait, wait_timeout, vip_id):
if int(timeout) < 30:
module.fail_json(msg='"timeout" must be greater than or equal to 30')
changed = False
balancers = []
clb = pyrax.cloud_loadbalancers
if not clb:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
balancer_list = clb.list()
while balancer_list:
retrieved = clb.list(marker=balancer_list.pop().id)
balancer_list.extend(retrieved)
if len(retrieved) < 2:
break
for balancer in balancer_list:
if name != balancer.name and name != balancer.id:
continue
balancers.append(balancer)
if len(balancers) > 1:
module.fail_json(msg='Multiple Load Balancers were matched by name, '
'try using the Load Balancer ID instead')
if state == 'present':
if isinstance(meta, dict):
metadata = [dict(key=k, value=v) for k, v in meta.items()]
if not balancers:
try:
virtual_ips = [clb.VirtualIP(type=vip_type, id=vip_id)]
balancer = clb.create(name, metadata=metadata, port=port,
algorithm=algorithm, protocol=protocol,
timeout=timeout, virtual_ips=virtual_ips)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
else:
balancer = balancers[0]
setattr(balancer, 'metadata',
[dict(key=k, value=v) for k, v in
balancer.get_metadata().items()])
atts = {
'name': name,
'algorithm': algorithm,
'port': port,
'protocol': protocol,
'timeout': timeout
}
for att, value in atts.items():
current = getattr(balancer, att)
if current != value:
changed = True
if changed:
balancer.update(**atts)
if balancer.metadata != metadata:
balancer.set_metadata(meta)
changed = True
virtual_ips = [clb.VirtualIP(type=vip_type)]
current_vip_types = set([v.type for v in balancer.virtual_ips])
vip_types = set([v.type for v in virtual_ips])
if current_vip_types != vip_types:
module.fail_json(msg='Load balancer Virtual IP type cannot '
'be changed')
if wait:
attempts = wait_timeout / 5
pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
balancer.get()
instance = rax_to_dict(balancer, 'clb')
result = dict(changed=changed, balancer=instance)
if balancer.status == 'ERROR':
result['msg'] = '%s failed to build' % balancer.id
elif wait and balancer.status not in ('ACTIVE', 'ERROR'):
result['msg'] = 'Timeout waiting on %s' % balancer.id
if 'msg' in result:
module.fail_json(**result)
else:
module.exit_json(**result)
elif state == 'absent':
if balancers:
balancer = balancers[0]
try:
balancer.delete()
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
instance = rax_to_dict(balancer, 'clb')
if wait:
attempts = wait_timeout / 5
pyrax.utils.wait_until(balancer, 'status', ('DELETED'),
interval=5, attempts=attempts)
else:
instance = {}
module.exit_json(changed=changed, balancer=instance)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
algorithm=dict(choices=CLB_ALGORITHMS,
default='LEAST_CONNECTIONS'),
meta=dict(type='dict', default={}),
name=dict(required=True),
port=dict(type='int', default=80),
protocol=dict(choices=CLB_PROTOCOLS, default='HTTP'),
state=dict(default='present', choices=['present', 'absent']),
timeout=dict(type='int', default=30),
type=dict(choices=['PUBLIC', 'SERVICENET'], default='PUBLIC'),
vip_id=dict(),
wait=dict(type='bool'),
wait_timeout=dict(default=300),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
algorithm = module.params.get('algorithm')
meta = module.params.get('meta')
name = module.params.get('name')
port = module.params.get('port')
protocol = module.params.get('protocol')
state = module.params.get('state')
timeout = int(module.params.get('timeout'))
vip_id = module.params.get('vip_id')
vip_type = module.params.get('type')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
setup_rax_module(module, pyrax)
cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
vip_type, timeout, wait, wait_timeout, vip_id)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
if __name__ == '__main__':
main()
| gpl-3.0 |
justacec/bokeh | bokeh/models/tests/test_annotations.py | 2 | 8903 | from __future__ import absolute_import
from itertools import chain
from bokeh.models.annotations import (
Legend, Arrow, BoxAnnotation, Span, LabelSet, Label, Title
)
from bokeh.models import ColumnDataSource, ArrowHead
from bokeh.core.enums import (
NamedColor as Color, LineJoin, LineCap, FontStyle, TextAlign
)
FILL = ["fill_color", "fill_alpha"]
LINE = ["line_color", "line_width", "line_alpha", "line_join", "line_cap",
"line_dash", "line_dash_offset"]
TEXT = ["text_font", "text_font_size", "text_font_style", "text_color",
"text_alpha", "text_align", "text_baseline"]
ANGLE = ["angle", "angle_units"]
PROPS = ["name", "tags"]
def prefix(prefix, props):
return [prefix + p for p in props]
def check_props(annotation, *props):
expected = set(chain(PROPS, *props))
found = set(annotation.properties())
missing = expected.difference(found)
extra = found.difference(expected)
assert len(missing) == 0, "Properties missing: {0}".format(", ".join(sorted(missing)))
assert len(extra) == 0, "Extra properties: {0}".format(", ".join(sorted(extra)))
def check_fill(annotation, prefix="", fill_color='#ffffff', fill_alpha=1.0):
assert getattr(annotation, prefix + "fill_color") == fill_color
assert getattr(annotation, prefix + "fill_alpha") == fill_alpha
def check_line(annotation, prefix="", line_color=Color.black, line_width=1.0, line_alpha=1.0):
assert getattr(annotation, prefix + "line_color") == line_color
assert getattr(annotation, prefix + "line_width") == line_width
assert getattr(annotation, prefix + "line_alpha") == line_alpha
assert getattr(annotation, prefix + "line_join") == LineJoin.miter
assert getattr(annotation, prefix + "line_cap") == LineCap.butt
assert getattr(annotation, prefix + "line_dash") == []
assert getattr(annotation, prefix + "line_dash_offset") == 0
def check_text(annotation, prefix="", font_size='12pt', baseline='bottom', font_style='normal'):
assert getattr(annotation, prefix + "text_font") == "helvetica"
assert getattr(annotation, prefix + "text_font_size") == {"value": font_size}
assert getattr(annotation, prefix + "text_font_style") == font_style
assert getattr(annotation, prefix + "text_color") == "#444444"
assert getattr(annotation, prefix + "text_alpha") == 1.0
assert getattr(annotation, prefix + "text_align") == TextAlign.left
assert getattr(annotation, prefix + "text_baseline") == baseline
def test_Legend():
legend = Legend()
assert legend.plot is None
assert legend.location == 'top_right'
assert legend.label_standoff == 15
assert legend.label_height == 20
assert legend.label_width == 50
assert legend.glyph_height == 20
assert legend.glyph_width == 20
assert legend.legend_padding == 10
assert legend.legend_spacing == 3
assert legend.legends == []
yield check_line, legend, "border_"
yield check_text, legend, "label_", "10pt", "middle"
yield check_fill, legend, "background_"
yield (check_props, legend, [
"plot",
"location",
"orientation",
"label_standoff",
"label_height",
"label_width",
"glyph_height",
"glyph_width",
"legend_padding",
"legend_spacing",
"legends",
"level"],
prefix('label_', TEXT),
prefix('border_', LINE),
prefix('background_', FILL))
def test_Arrow():
arrow = Arrow()
assert arrow.plot is None
assert arrow.x_start is None
assert arrow.y_start is None
assert arrow.start_units == 'data'
assert arrow.start is None
assert arrow.x_end is None
assert arrow.y_end is None
assert arrow.end_units == 'data'
assert isinstance(arrow.end, ArrowHead)
assert isinstance(arrow.source, ColumnDataSource)
assert arrow.source.data == {}
assert arrow.x_range_name == "default"
assert arrow.y_range_name == "default"
yield check_line, arrow
yield (check_props, arrow, [
"plot",
"level",
"x_start",
"y_start",
"start_units",
"start",
"x_end",
"y_end",
"end_units",
"end",
"source",
"x_range_name",
"y_range_name"],
LINE)
def test_BoxAnnotation():
box = BoxAnnotation()
assert box.plot is None
assert box.left == None
assert box.left_units == 'data'
assert box.right == None
assert box.right_units == 'data'
assert box.bottom == None
assert box.bottom_units == 'data'
assert box.top == None
assert box.top_units == 'data'
assert box.x_range_name == 'default'
assert box.y_range_name == 'default'
assert box.level == 'annotation'
yield check_line, box, "", '#cccccc', 1, 0.3
yield check_fill, box, "", "#fff9ba", 0.4
yield (check_props, box, [
"render_mode",
"plot",
"left",
"left_units",
"right",
"right_units",
"bottom",
"bottom_units",
"top",
"top_units",
"x_range_name",
"y_range_name",
"level",
], LINE, FILL)
def test_Label():
label = Label()
assert label.plot is None
assert label.level == 'annotation'
assert label.x is None
assert label.y is None
assert label.x_units == 'data'
assert label.y_units == 'data'
assert label.text is None
assert label.angle == 0
assert label.angle_units == 'rad'
assert label.x_offset == 0
assert label.y_offset == 0
assert label.render_mode == 'canvas'
assert label.x_range_name == 'default'
assert label.y_range_name == 'default'
yield check_text, label
yield check_fill, label, "background_", None, 1.0
yield check_line, label, "border_", None, 1.0, 1.0
yield (check_props, label, [
"plot",
"level",
"x",
"y",
"x_units",
"y_units",
"text",
"angle",
"angle_units",
"x_offset",
"y_offset",
"render_mode",
"x_range_name",
"y_range_name"],
TEXT,
prefix('border_', LINE),
prefix('background_', FILL))
def test_LabelSet():
label_set = LabelSet()
assert label_set.plot is None
assert label_set.level == 'annotation'
assert label_set.x is None
assert label_set.y is None
assert label_set.x_units == 'data'
assert label_set.y_units == 'data'
assert label_set.text == 'text'
assert label_set.angle == 0
assert label_set.angle_units == 'rad'
assert label_set.x_offset == 0
assert label_set.y_offset == 0
assert label_set.render_mode == 'canvas'
assert label_set.x_range_name == 'default'
assert label_set.y_range_name == 'default'
assert isinstance(label_set.source, ColumnDataSource)
assert label_set.source.data == {}
yield check_text, label_set
yield check_fill, label_set, "background_", None, 1.0
yield check_line, label_set, "border_", None, 1.0, 1.0
yield (check_props, label_set, [
"plot",
"level",
"x",
"y",
"x_units",
"y_units",
"text",
"angle",
"angle_units",
"x_offset",
"y_offset",
"render_mode",
"x_range_name",
"y_range_name",
"source"],
TEXT,
ANGLE,
prefix('border_', LINE),
prefix('background_', FILL))
def test_Span():
line = Span()
assert line.plot is None
assert line.location is None
assert line.location_units == 'data'
assert line.dimension == 'width'
assert line.x_range_name == 'default'
assert line.y_range_name == 'default'
assert line.level == 'annotation'
assert line.render_mode == 'canvas'
yield check_line, line, "", 'black', 1.0
yield (check_props, line, [
"plot",
"location",
"location_units",
"dimension",
"x_range_name",
"y_range_name",
"level",
"render_mode"
], LINE)
def test_Title():
title = Title()
assert title.plot is None
assert title.level == 'annotation'
assert title.text is None
assert title.title_align == 'center'
assert title.title_padding == 0
assert title.text_font == 'helvetica'
assert title.text_font_size == {'value': '12pt'}
assert title.text_font_style == 'normal'
assert title.text_color == '#444444'
assert title.text_alpha == 1.0
yield check_fill, title, "background_", None, 1.0
yield check_line, title, "border_", None, 1.0, 1.0
yield (check_props, title, [
"plot",
"level",
"text",
"title_align",
"title_padding",
"text_font",
"text_font_size",
"text_font_style",
"text_color",
"text_alpha",
"render_mode"],
prefix('border_', LINE),
prefix('background_', FILL))
| bsd-3-clause |
jriguera/Diamond | src/diamond/handler/Handler.py | 21 | 4235 | # coding=utf-8
import logging
import threading
import traceback
from configobj import ConfigObj
import time
class Handler(object):
"""
Handlers process metrics that are collected by Collectors.
"""
def __init__(self, config=None, log=None):
"""
Create a new instance of the Handler class
"""
# Enabled? Default to yes, but allow handlers to disable themselves
self.enabled = True
# Initialize Log
if log is None:
self.log = logging.getLogger('diamond')
else:
self.log = log
# Initialize Blank Configs
self.config = ConfigObj()
# Load default
self.config.merge(self.get_default_config())
# Load in user
self.config.merge(config)
# error logging throttling
self.server_error_interval = float(self.config['server_error_interval'])
self._errors = {}
# Initialize Lock
self.lock = threading.Lock()
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
return {
'get_default_config_help': 'get_default_config_help',
'server_error_interval': ('How frequently to send repeated server '
'errors'),
}
def get_default_config(self):
"""
Return the default config for the handler
"""
return {
'get_default_config': 'get_default_config',
'server_error_interval': 120,
}
def _process(self, metric):
"""
Decorator for processing handlers with a lock, catching exceptions
"""
if not self.enabled:
return
try:
try:
self.lock.acquire()
self.process(metric)
except Exception:
self.log.error(traceback.format_exc())
finally:
if self.lock.locked():
self.lock.release()
def process(self, metric):
"""
Process a metric
Should be overridden in subclasses
"""
raise NotImplementedError
def _flush(self):
"""
Decorator for flushing handlers with an lock, catching exceptions
"""
if not self.enabled:
return
try:
try:
self.lock.acquire()
self.flush()
except Exception:
self.log.error(traceback.format_exc())
finally:
if self.lock.locked():
self.lock.release()
def flush(self):
"""
Flush metrics
Optional: Should be overridden in subclasses
"""
pass
def _throttle_error(self, msg, *args, **kwargs):
"""
Avoids sending errors repeatedly. Waits at least
`self.server_error_interval` seconds before sending the same error
string to the error logging facility. If not enough time has passed,
it calls `log.debug` instead
Receives the same parameters as `Logger.error` an passes them on to the
selected logging function, but ignores all parameters but the main
message string when checking the last emission time.
:returns: the return value of `Logger.debug` or `Logger.error`
"""
now = time.time()
if msg in self._errors:
if ((now - self._errors[msg]) >=
self.server_error_interval):
fn = self.log.error
self._errors[msg] = now
else:
fn = self.log.debug
else:
self._errors[msg] = now
fn = self.log.error
return fn(msg, *args, **kwargs)
def _reset_errors(self, msg=None):
"""
Resets the logging throttle cache, so the next error is emitted
regardless of the value in `self.server_error_interval`
:param msg: if present, only this key is reset. Otherwise, the whole
cache is cleaned.
"""
if msg is not None and msg in self._errors:
del self._errors[msg]
else:
self._errors = {}
| mit |
eyohansa/django | tests/migrations/test_multidb.py | 366 | 6909 | import unittest
from django.db import connection, migrations, models
from django.db.migrations.state import ProjectState
from django.test import override_settings
from .test_operations import OperationTestBase
try:
import sqlparse
except ImportError:
sqlparse = None
class AgnosticRouter(object):
"""
A router that doesn't have an opinion regarding migrating.
"""
def allow_migrate(self, db, app_label, **hints):
return None
class MigrateNothingRouter(object):
"""
A router that doesn't allow migrating.
"""
def allow_migrate(self, db, app_label, **hints):
return False
class MigrateEverythingRouter(object):
"""
A router that always allows migrating.
"""
def allow_migrate(self, db, app_label, **hints):
return True
class MigrateWhenFooRouter(object):
"""
A router that allows migrating depending on a hint.
"""
def allow_migrate(self, db, app_label, **hints):
return hints.get('foo', False)
class MultiDBOperationTests(OperationTestBase):
multi_db = True
def _test_create_model(self, app_label, should_run):
"""
Tests that CreateModel honours multi-db settings.
"""
operation = migrations.CreateModel(
"Pony",
[("id", models.AutoField(primary_key=True))],
)
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
# Test the database alteration
self.assertTableNotExists("%s_pony" % app_label)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
if should_run:
self.assertTableExists("%s_pony" % app_label)
else:
self.assertTableNotExists("%s_pony" % app_label)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertTableNotExists("%s_pony" % app_label)
@override_settings(DATABASE_ROUTERS=[AgnosticRouter()])
def test_create_model(self):
"""
Test when router doesn't have an opinion (i.e. CreateModel should run).
"""
self._test_create_model("test_mltdb_crmo", should_run=True)
@override_settings(DATABASE_ROUTERS=[MigrateNothingRouter()])
def test_create_model2(self):
"""
Test when router returns False (i.e. CreateModel shouldn't run).
"""
self._test_create_model("test_mltdb_crmo2", should_run=False)
@override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter()])
def test_create_model3(self):
"""
Test when router returns True (i.e. CreateModel should run).
"""
self._test_create_model("test_mltdb_crmo3", should_run=True)
def test_create_model4(self):
"""
Test multiple routers.
"""
with override_settings(DATABASE_ROUTERS=[AgnosticRouter(), AgnosticRouter()]):
self._test_create_model("test_mltdb_crmo4", should_run=True)
with override_settings(DATABASE_ROUTERS=[MigrateNothingRouter(), MigrateEverythingRouter()]):
self._test_create_model("test_mltdb_crmo4", should_run=False)
with override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter(), MigrateNothingRouter()]):
self._test_create_model("test_mltdb_crmo4", should_run=True)
def _test_run_sql(self, app_label, should_run, hints=None):
with override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter()]):
project_state = self.set_up_test_model(app_label)
sql = """
INSERT INTO {0}_pony (pink, weight) VALUES (1, 3.55);
INSERT INTO {0}_pony (pink, weight) VALUES (3, 5.0);
""".format(app_label)
operation = migrations.RunSQL(sql, hints=hints or {})
# Test the state alteration does nothing
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(new_state, project_state)
# Test the database alteration
self.assertEqual(project_state.apps.get_model(app_label, "Pony").objects.count(), 0)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony = project_state.apps.get_model(app_label, "Pony")
if should_run:
self.assertEqual(Pony.objects.count(), 2)
else:
self.assertEqual(Pony.objects.count(), 0)
@unittest.skipIf(sqlparse is None and connection.features.requires_sqlparse_for_splitting, "Missing sqlparse")
@override_settings(DATABASE_ROUTERS=[MigrateNothingRouter()])
def test_run_sql(self):
self._test_run_sql("test_mltdb_runsql", should_run=False)
@unittest.skipIf(sqlparse is None and connection.features.requires_sqlparse_for_splitting, "Missing sqlparse")
@override_settings(DATABASE_ROUTERS=[MigrateWhenFooRouter()])
def test_run_sql2(self):
self._test_run_sql("test_mltdb_runsql2", should_run=False)
self._test_run_sql("test_mltdb_runsql2", should_run=True, hints={'foo': True})
def _test_run_python(self, app_label, should_run, hints=None):
with override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter()]):
project_state = self.set_up_test_model(app_label)
# Create the operation
def inner_method(models, schema_editor):
Pony = models.get_model(app_label, "Pony")
Pony.objects.create(pink=1, weight=3.55)
Pony.objects.create(weight=5)
operation = migrations.RunPython(inner_method, hints=hints or {})
# Test the state alteration does nothing
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(new_state, project_state)
# Test the database alteration
self.assertEqual(project_state.apps.get_model(app_label, "Pony").objects.count(), 0)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony = project_state.apps.get_model(app_label, "Pony")
if should_run:
self.assertEqual(Pony.objects.count(), 2)
else:
self.assertEqual(Pony.objects.count(), 0)
@override_settings(DATABASE_ROUTERS=[MigrateNothingRouter()])
def test_run_python(self):
self._test_run_python("test_mltdb_runpython", should_run=False)
@override_settings(DATABASE_ROUTERS=[MigrateWhenFooRouter()])
def test_run_python2(self):
self._test_run_python("test_mltdb_runpython2", should_run=False)
self._test_run_python("test_mltdb_runpython2", should_run=True, hints={'foo': True})
| bsd-3-clause |
zofuthan/edx-platform | cms/djangoapps/contentstore/course_info_model.py | 112 | 6790 | """
Views for viewing, adding, updating and deleting course updates.
Current db representation:
{
"_id" : locationjson,
"definition" : {
"data" : "<ol>[<li><h2>date</h2>content</li>]</ol>"},
"items" : [{"id": ID, "date": DATE, "content": CONTENT}]
"metadata" : ignored
}
}
"""
import re
import logging
from django.http import HttpResponseBadRequest
from django.utils.translation import ugettext as _
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.django import modulestore
from xmodule.html_module import CourseInfoModule
from openedx.core.lib.xblock_utils import get_course_update_items
from cms.djangoapps.contentstore.push_notification import enqueue_push_course_update
# # This should be in a class which inherits from XmlDescriptor
log = logging.getLogger(__name__)
def get_course_updates(location, provided_id, user_id):
"""
Retrieve the relevant course_info updates and unpack into the model which the client expects:
[{id : index, date : string, content : html string}]
"""
try:
course_updates = modulestore().get_item(location)
except ItemNotFoundError:
course_updates = modulestore().create_item(user_id, location.course_key, location.block_type, location.block_id)
course_update_items = get_course_update_items(course_updates, _get_index(provided_id))
return _get_visible_update(course_update_items)
def update_course_updates(location, update, passed_id=None, user=None):
"""
Either add or update the given course update.
Add:
If the passed_id is absent or None, the course update is added.
If push_notification_selected is set in the update, a celery task for the push notification is created.
Update:
It will update it if it has a passed_id which has a valid value.
Until updates have distinct values, the passed_id is the location url + an index into the html structure.
"""
try:
course_updates = modulestore().get_item(location)
except ItemNotFoundError:
course_updates = modulestore().create_item(user.id, location.course_key, location.block_type, location.block_id)
course_update_items = list(reversed(get_course_update_items(course_updates)))
if passed_id is not None:
passed_index = _get_index(passed_id)
# oldest update at start of list
if 0 < passed_index <= len(course_update_items):
course_update_dict = course_update_items[passed_index - 1]
course_update_dict["date"] = update["date"]
course_update_dict["content"] = update["content"]
course_update_items[passed_index - 1] = course_update_dict
else:
return HttpResponseBadRequest(_("Invalid course update id."))
else:
course_update_dict = {
"id": len(course_update_items) + 1,
"date": update["date"],
"content": update["content"],
"status": CourseInfoModule.STATUS_VISIBLE
}
course_update_items.append(course_update_dict)
enqueue_push_course_update(update, location.course_key)
# update db record
save_course_update_items(location, course_updates, course_update_items, user)
# remove status key
if "status" in course_update_dict:
del course_update_dict["status"]
return course_update_dict
def _make_update_dict(update):
"""
Return course update item as a dictionary with required keys ('id', "date" and "content").
"""
return {
"id": update["id"],
"date": update["date"],
"content": update["content"],
}
def _get_visible_update(course_update_items):
"""
Filter course update items which have status "deleted".
"""
if isinstance(course_update_items, dict):
# single course update item
if course_update_items.get("status") != CourseInfoModule.STATUS_DELETED:
return _make_update_dict(course_update_items)
else:
# requested course update item has been deleted (soft delete)
return {"error": _("Course update not found."), "status": 404}
return ([_make_update_dict(update) for update in course_update_items
if update.get("status") != CourseInfoModule.STATUS_DELETED])
# pylint: disable=unused-argument
def delete_course_update(location, update, passed_id, user):
"""
Don't delete course update item from db.
Delete the given course_info update by settings "status" flag to 'deleted'.
Returns the resulting course_updates.
"""
if not passed_id:
return HttpResponseBadRequest()
try:
course_updates = modulestore().get_item(location)
except ItemNotFoundError:
return HttpResponseBadRequest()
course_update_items = list(reversed(get_course_update_items(course_updates)))
passed_index = _get_index(passed_id)
# delete update item from given index
if 0 < passed_index <= len(course_update_items):
course_update_item = course_update_items[passed_index - 1]
# soft delete course update item
course_update_item["status"] = CourseInfoModule.STATUS_DELETED
course_update_items[passed_index - 1] = course_update_item
# update db record
save_course_update_items(location, course_updates, course_update_items, user)
return _get_visible_update(course_update_items)
else:
return HttpResponseBadRequest(_("Invalid course update id."))
def _get_index(passed_id=None):
"""
From the url w/ index appended, get the index.
"""
if passed_id:
index_matcher = re.search(r'.*?/?(\d+)$', passed_id)
if index_matcher:
return int(index_matcher.group(1))
# return 0 if no index found
return 0
def _get_html(course_updates_items):
"""
Method to create course_updates_html from course_updates items
"""
list_items = []
for update in reversed(course_updates_items):
# filter course update items which have status "deleted".
if update.get("status") != CourseInfoModule.STATUS_DELETED:
list_items.append(u"<article><h2>{date}</h2>{content}</article>".format(**update))
return u"<section>{list_items}</section>".format(list_items="".join(list_items))
def save_course_update_items(location, course_updates, course_update_items, user=None):
"""
Save list of course_updates data dictionaries in new field ("course_updates.items")
and html related to course update in 'data' ("course_updates.data") field.
"""
course_updates.items = course_update_items
course_updates.data = _get_html(course_update_items)
# update db record
modulestore().update_item(course_updates, user.id)
return course_updates
| agpl-3.0 |
wanggang3333/scikit-learn | sklearn/manifold/tests/test_isomap.py | 226 | 3941 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
| bsd-3-clause |
bradleyayers/suds-htj | suds/umx/core.py | 199 | 7575 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides base classes for XML->object I{unmarshalling}.
"""
from logging import getLogger
from suds import *
from suds.umx import *
from suds.umx.attrlist import AttrList
from suds.sax.text import Text
from suds.sudsobject import Factory, merge
log = getLogger(__name__)
reserved = { 'class':'cls', 'def':'dfn', }
class Core:
"""
The abstract XML I{node} unmarshaller. This class provides the
I{core} unmarshalling functionality.
"""
def process(self, content):
"""
Process an object graph representation of the xml I{node}.
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: A suds object.
@rtype: L{Object}
"""
self.reset()
return self.append(content)
def append(self, content):
"""
Process the specified node and convert the XML document into
a I{suds} L{object}.
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: A I{append-result} tuple as: (L{Object}, I{value})
@rtype: I{append-result}
@note: This is not the proper entry point.
@see: L{process()}
"""
self.start(content)
self.append_attributes(content)
self.append_children(content)
self.append_text(content)
self.end(content)
return self.postprocess(content)
def postprocess(self, content):
"""
Perform final processing of the resulting data structure as follows:
- Mixed values (children and text) will have a result of the I{content.node}.
- Simi-simple values (attributes, no-children and text) will have a result of a
property object.
- Simple values (no-attributes, no-children with text nodes) will have a string
result equal to the value of the content.node.getText().
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: The post-processed result.
@rtype: I{any}
"""
node = content.node
if len(node.children) and node.hasText():
return node
attributes = AttrList(node.attributes)
if attributes.rlen() and \
not len(node.children) and \
node.hasText():
p = Factory.property(node.name, node.getText())
return merge(content.data, p)
if len(content.data):
return content.data
lang = attributes.lang()
if content.node.isnil():
return None
if not len(node.children) and content.text is None:
if self.nillable(content):
return None
else:
return Text('', lang=lang)
if isinstance(content.text, basestring):
return Text(content.text, lang=lang)
else:
return content.text
def append_attributes(self, content):
"""
Append attribute nodes into L{Content.data}.
Attributes in the I{schema} or I{xml} namespaces are skipped.
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
attributes = AttrList(content.node.attributes)
for attr in attributes.real():
name = attr.name
value = attr.value
self.append_attribute(name, value, content)
def append_attribute(self, name, value, content):
"""
Append an attribute name/value into L{Content.data}.
@param name: The attribute name
@type name: basestring
@param value: The attribute's value
@type value: basestring
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
key = name
key = '_%s' % reserved.get(key, key)
setattr(content.data, key, value)
def append_children(self, content):
"""
Append child nodes into L{Content.data}
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
for child in content.node:
cont = Content(child)
cval = self.append(cont)
key = reserved.get(child.name, child.name)
if key in content.data:
v = getattr(content.data, key)
if isinstance(v, list):
v.append(cval)
else:
setattr(content.data, key, [v, cval])
continue
if self.unbounded(cont):
if cval is None:
setattr(content.data, key, [])
else:
setattr(content.data, key, [cval,])
else:
setattr(content.data, key, cval)
def append_text(self, content):
"""
Append text nodes into L{Content.data}
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
if content.node.hasText():
content.text = content.node.getText()
def reset(self):
pass
def start(self, content):
"""
Processing on I{node} has started. Build and return
the proper object.
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: A subclass of Object.
@rtype: L{Object}
"""
content.data = Factory.object(content.node.name)
def end(self, content):
"""
Processing on I{node} has ended.
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
pass
def bounded(self, content):
"""
Get whether the content is bounded (not a list).
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: True if bounded, else False
@rtype: boolean
'"""
return ( not self.unbounded(content) )
def unbounded(self, content):
"""
Get whether the object is unbounded (a list).
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: True if unbounded, else False
@rtype: boolean
'"""
return False
def nillable(self, content):
"""
Get whether the object is nillable.
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: True if nillable, else False
@rtype: boolean
'"""
return False | lgpl-3.0 |
Slezhuk/ansible | lib/ansible/module_utils/vmware.py | 51 | 16732 | # -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.module_utils.six import iteritems
import atexit
import ssl
import time
try:
# requests is required for exception handling of the ConnectionError
import requests
from pyVim import connect
from pyVmomi import vim
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
class TaskError(Exception):
pass
def wait_for_task(task):
while True:
if task.info.state == vim.TaskInfo.State.success:
return True, task.info.result
if task.info.state == vim.TaskInfo.State.error:
try:
raise TaskError(task.info.error)
except AttributeError:
raise TaskError("An unknown error has occurred")
if task.info.state == vim.TaskInfo.State.running:
time.sleep(15)
if task.info.state == vim.TaskInfo.State.queued:
time.sleep(15)
def find_dvspg_by_name(dv_switch, portgroup_name):
portgroups = dv_switch.portgroup
for pg in portgroups:
if pg.name == portgroup_name:
return pg
return None
def find_entity_child_by_path(content, entityRootFolder, path):
entity = entityRootFolder
searchIndex = content.searchIndex
paths = path.split("/")
try:
for path in paths:
entity = searchIndex.FindChild (entity, path)
if entity.name == paths[-1]:
return entity
except:
pass
return None
# Maintain for legacy, or remove with 2.1 ?
# Should be replaced with find_cluster_by_name
def find_cluster_by_name_datacenter(datacenter, cluster_name):
host_folder = datacenter.hostFolder
for folder in host_folder.childEntity:
if folder.name == cluster_name:
return folder
return None
def find_cluster_by_name(content, cluster_name, datacenter=None):
if datacenter:
folder = datacenter.hostFolder
else:
folder = content.rootFolder
clusters = get_all_objs(content, [vim.ClusterComputeResource], folder)
for cluster in clusters:
if cluster.name == cluster_name:
return cluster
return None
def find_datacenter_by_name(content, datacenter_name):
datacenters = get_all_objs(content, [vim.Datacenter])
for dc in datacenters:
if dc.name == datacenter_name:
return dc
return None
def find_datastore_by_name(content, datastore_name):
datastores = get_all_objs(content, [vim.Datastore])
for ds in datastores:
if ds.name == datastore_name:
return ds
return None
def find_dvs_by_name(content, switch_name):
vmware_distributed_switches = get_all_objs(content, [vim.dvs.VmwareDistributedVirtualSwitch])
for dvs in vmware_distributed_switches:
if dvs.name == switch_name:
return dvs
return None
def find_hostsystem_by_name(content, hostname):
host_system = get_all_objs(content, [vim.HostSystem])
for host in host_system:
if host.name == hostname:
return host
return None
def find_vm_by_id(content, vm_id, vm_id_type="vm_name", datacenter=None, cluster=None):
""" UUID is unique to a VM, every other id returns the first match. """
si = content.searchIndex
vm = None
if vm_id_type == 'dns_name':
vm = si.FindByDnsName(datacenter=datacenter, dnsName=vm_id, vmSearch=True)
elif vm_id_type == 'inventory_path':
vm = si.FindByInventoryPath(inventoryPath=vm_id)
if isinstance(vm, vim.VirtualMachine):
vm = None
elif vm_id_type == 'uuid':
vm = si.FindByUuid(datacenter=datacenter, instanceUuid=vm_id, vmSearch=True)
elif vm_id_type == 'ip':
vm = si.FindByIp(datacenter=datacenter, ip=vm_id, vmSearch=True)
elif vm_id_type == 'vm_name':
folder = None
if cluster:
folder = cluster
elif datacenter:
folder = datacenter.hostFolder
vm = find_vm_by_name(content, vm_id, folder)
return vm
def find_vm_by_name(content, vm_name, folder=None, recurse=True):
vms = get_all_objs(content, [vim.VirtualMachine], folder, recurse=recurse)
for vm in vms:
if vm.name == vm_name:
return vm
return None
def find_host_portgroup_by_name(host, portgroup_name):
for portgroup in host.config.network.portgroup:
if portgroup.spec.name == portgroup_name:
return portgroup
return None
def gather_vm_facts(content, vm):
""" Gather facts from vim.VirtualMachine object. """
facts = {
'module_hw': True,
'hw_name': vm.config.name,
'hw_power_status': vm.summary.runtime.powerState,
'hw_guest_full_name': vm.summary.guest.guestFullName,
'hw_guest_id': vm.summary.guest.guestId,
'hw_product_uuid': vm.config.uuid,
'hw_processor_count': vm.config.hardware.numCPU,
'hw_memtotal_mb': vm.config.hardware.memoryMB,
'hw_interfaces': [],
'guest_tools_status': vm.guest.toolsRunningStatus,
'guest_tools_version': vm.guest.toolsVersion,
'ipv4': None,
'ipv6': None,
'annotation': vm.config.annotation,
'customvalues': {},
'snapshots': [],
'current_snapshot': None,
}
cfm = content.customFieldsManager
# Resolve custom values
for value_obj in vm.summary.customValue:
kn = value_obj.key
if cfm is not None and cfm.field:
for f in cfm.field:
if f.key == value_obj.key:
kn = f.name
# Exit the loop immediately, we found it
break
facts['customvalues'][kn] = value_obj.value
net_dict = {}
for device in vm.guest.net:
net_dict[device.macAddress] = list(device.ipAddress)
for k, v in iteritems(net_dict):
for ipaddress in v:
if ipaddress:
if '::' in ipaddress:
facts['ipv6'] = ipaddress
else:
facts['ipv4'] = ipaddress
ethernet_idx = 0
for idx, entry in enumerate(vm.config.hardware.device):
if not hasattr(entry, 'macAddress'):
continue
factname = 'hw_eth' + str(ethernet_idx)
facts[factname] = {
'addresstype': entry.addressType,
'label': entry.deviceInfo.label,
'macaddress': entry.macAddress,
'ipaddresses': net_dict.get(entry.macAddress, None),
'macaddress_dash': entry.macAddress.replace(':', '-'),
'summary': entry.deviceInfo.summary,
}
facts['hw_interfaces'].append('eth' + str(ethernet_idx))
ethernet_idx += 1
snapshot_facts = list_snapshots(vm)
if 'snapshots' in snapshot_facts:
facts['snapshots'] = snapshot_facts['snapshots']
facts['current_snapshot'] = snapshot_facts['current_snapshot']
return facts
def deserialize_snapshot_obj(obj):
return {'id': obj.id,
'name': obj.name,
'description': obj.description,
'creation_time': obj.createTime,
'state': obj.state}
def list_snapshots_recursively(snapshots):
snapshot_data = []
for snapshot in snapshots:
snapshot_data.append(deserialize_snapshot_obj(snapshot))
snapshot_data = snapshot_data + list_snapshots_recursively(snapshot.childSnapshotList)
return snapshot_data
def get_current_snap_obj(snapshots, snapob):
snap_obj = []
for snapshot in snapshots:
if snapshot.snapshot == snapob:
snap_obj.append(snapshot)
snap_obj = snap_obj + get_current_snap_obj(snapshot.childSnapshotList, snapob)
return snap_obj
def list_snapshots(vm):
result = {}
if vm.snapshot is None:
return result
result['snapshots'] = list_snapshots_recursively(vm.snapshot.rootSnapshotList)
current_snapref = vm.snapshot.currentSnapshot
current_snap_obj = get_current_snap_obj(vm.snapshot.rootSnapshotList, current_snapref)
result['current_snapshot'] = deserialize_snapshot_obj(current_snap_obj[0])
return result
def vmware_argument_spec():
return dict(
hostname=dict(type='str', required=True),
username=dict(type='str', aliases=['user', 'admin'], required=True),
password=dict(type='str', aliases=['pass', 'pwd'], required=True, no_log=True),
validate_certs=dict(type='bool', required=False, default=True),
)
def connect_to_api(module, disconnect_atexit=True):
hostname = module.params['hostname']
username = module.params['username']
password = module.params['password']
validate_certs = module.params['validate_certs']
if validate_certs and not hasattr(ssl, 'SSLContext'):
module.fail_json(msg='pyVim does not support changing verification mode with python < 2.7.9. Either update '
'python or or use validate_certs=false')
try:
service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password)
except vim.fault.InvalidLogin as invalid_login:
module.fail_json(msg=invalid_login.msg, apierror=str(invalid_login))
except (requests.ConnectionError, ssl.SSLError) as connection_error:
if '[SSL: CERTIFICATE_VERIFY_FAILED]' in str(connection_error) and not validate_certs:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_NONE
service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password, sslContext=context)
else:
module.fail_json(msg="Unable to connect to vCenter or ESXi API on TCP/443.", apierror=str(connection_error))
except Exception as e:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_NONE
service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password, sslContext=context)
# Disabling atexit should be used in special cases only.
# Such as IP change of the ESXi host which removes the connection anyway.
# Also removal significantly speeds up the return of the module
if disconnect_atexit:
atexit.register(connect.Disconnect, service_instance)
return service_instance.RetrieveContent()
def get_all_objs(content, vimtype, folder=None, recurse=True):
if not folder:
folder = content.rootFolder
obj = {}
container = content.viewManager.CreateContainerView(folder, vimtype, recurse)
for managed_object_ref in container.view:
obj.update({managed_object_ref: managed_object_ref.name})
return obj
def fetch_file_from_guest(content, vm, username, password, src, dest):
""" Use VMWare's filemanager api to fetch a file over http """
result = {'failed': False}
tools_status = vm.guest.toolsStatus
if tools_status == 'toolsNotInstalled' or tools_status == 'toolsNotRunning':
result['failed'] = True
result['msg'] = "VMwareTools is not installed or is not running in the guest"
return result
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
creds = vim.vm.guest.NamePasswordAuthentication(
username=username, password=password
)
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/FileManager/FileTransferInformation.rst
fti = content.guestOperationsManager.fileManager. \
InitiateFileTransferFromGuest(vm, creds, src)
result['size'] = fti.size
result['url'] = fti.url
# Use module_utils to fetch the remote url returned from the api
rsp, info = fetch_url(self.module, fti.url, use_proxy=False,
force=True, last_mod_time=None,
timeout=10, headers=None)
# save all of the transfer data
for k, v in iteritems(info):
result[k] = v
# exit early if xfer failed
if info['status'] != 200:
result['failed'] = True
return result
# attempt to read the content and write it
try:
with open(dest, 'wb') as f:
f.write(rsp.read())
except Exception as e:
result['failed'] = True
result['msg'] = str(e)
return result
def push_file_to_guest(content, vm, username, password, src, dest, overwrite=True):
""" Use VMWare's filemanager api to fetch a file over http """
result = {'failed': False}
tools_status = vm.guest.toolsStatus
if tools_status == 'toolsNotInstalled' or tools_status == 'toolsNotRunning':
result['failed'] = True
result['msg'] = "VMwareTools is not installed or is not running in the guest"
return result
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
creds = vim.vm.guest.NamePasswordAuthentication(
username=username, password=password
)
# the api requires a filesize in bytes
fdata = None
try:
# filesize = os.path.getsize(src)
filesize = os.stat(src).st_size
with open(src, 'rb') as f:
fdata = f.read()
result['local_filesize'] = filesize
except Exception as e:
result['failed'] = True
result['msg'] = "Unable to read src file: %s" % str(e)
return result
# https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.vm.guest.FileManager.html#initiateFileTransferToGuest
file_attribute = vim.vm.guest.FileManager.FileAttributes()
url = content.guestOperationsManager.fileManager. \
InitiateFileTransferToGuest(vm, creds, dest, file_attribute,
filesize, overwrite)
# PUT the filedata to the url ...
rsp, info = fetch_url(self.module, url, method="put", data=fdata,
use_proxy=False, force=True, last_mod_time=None,
timeout=10, headers=None)
result['msg'] = str(rsp.read())
# save all of the transfer data
for k, v in iteritems(info):
result[k] = v
return result
def run_command_in_guest(content, vm, username, password, program_path, program_args, program_cwd, program_env):
result = {'failed': False}
tools_status = vm.guest.toolsStatus
if (tools_status == 'toolsNotInstalled' or
tools_status == 'toolsNotRunning'):
result['failed'] = True
result['msg'] = "VMwareTools is not installed or is not running in the guest"
return result
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
creds = vim.vm.guest.NamePasswordAuthentication(
username=username, password=password
)
try:
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/ProcessManager.rst
pm = content.guestOperationsManager.processManager
# https://www.vmware.com/support/developer/converter-sdk/conv51_apireference/vim.vm.guest.ProcessManager.ProgramSpec.html
ps = vim.vm.guest.ProcessManager.ProgramSpec(
# programPath=program,
# arguments=args
programPath=program_path,
arguments=program_args,
workingDirectory=program_cwd,
)
res = pm.StartProgramInGuest(vm, creds, ps)
result['pid'] = res
pdata = pm.ListProcessesInGuest(vm, creds, [res])
# wait for pid to finish
while not pdata[0].endTime:
time.sleep(1)
pdata = pm.ListProcessesInGuest(vm, creds, [res])
result['owner'] = pdata[0].owner
result['startTime'] = pdata[0].startTime.isoformat()
result['endTime'] = pdata[0].endTime.isoformat()
result['exitCode'] = pdata[0].exitCode
if result['exitCode'] != 0:
result['failed'] = True
result['msg'] = "program exited non-zero"
else:
result['msg'] = "program completed successfully"
except Exception as e:
result['msg'] = str(e)
result['failed'] = True
return result
| gpl-3.0 |
Lyrositor/moul-scripts | Python/system/py_compile.py | 111 | 5930 | """Routine to "compile" a .py file to a .pyc (or .pyo) file.
This module has intimate knowledge of the format of .pyc files.
"""
import __builtin__
import imp
import marshal
import os
import sys
import traceback
MAGIC = imp.get_magic()
__all__ = ["compile", "main", "PyCompileError"]
class PyCompileError(Exception):
"""Exception raised when an error occurs while attempting to
compile the file.
To raise this exception, use
raise PyCompileError(exc_type,exc_value,file[,msg])
where
exc_type: exception type to be used in error message
type name can be accesses as class variable
'exc_type_name'
exc_value: exception value to be used in error message
can be accesses as class variable 'exc_value'
file: name of file being compiled to be used in error message
can be accesses as class variable 'file'
msg: string message to be written as error message
If no value is given, a default exception message will be given,
consistent with 'standard' py_compile output.
message (or default) can be accesses as class variable 'msg'
"""
def __init__(self, exc_type, exc_value, file, msg=''):
exc_type_name = exc_type.__name__
if exc_type is SyntaxError:
tbtext = ''.join(traceback.format_exception_only(exc_type, exc_value))
errmsg = tbtext.replace('File "<string>"', 'File "%s"' % file)
else:
errmsg = "Sorry: %s: %s" % (exc_type_name,exc_value)
Exception.__init__(self,msg or errmsg,exc_type_name,exc_value,file)
self.exc_type_name = exc_type_name
self.exc_value = exc_value
self.file = file
self.msg = msg or errmsg
def __str__(self):
return self.msg
def wr_long(f, x):
"""Internal; write a 32-bit int to a file in little-endian order."""
f.write(chr( x & 0xff))
f.write(chr((x >> 8) & 0xff))
f.write(chr((x >> 16) & 0xff))
f.write(chr((x >> 24) & 0xff))
def compile(file, cfile=None, dfile=None, doraise=False):
"""Byte-compile one Python source file to Python bytecode.
Arguments:
file: source filename
cfile: target filename; defaults to source with 'c' or 'o' appended
('c' normally, 'o' in optimizing mode, giving .pyc or .pyo)
dfile: purported filename; defaults to source (this is the filename
that will show up in error messages)
doraise: flag indicating whether or not an exception should be
raised when a compile error is found. If an exception
occurs and this flag is set to False, a string
indicating the nature of the exception will be printed,
and the function will return to the caller. If an
exception occurs and this flag is set to True, a
PyCompileError exception will be raised.
Note that it isn't necessary to byte-compile Python modules for
execution efficiency -- Python itself byte-compiles a module when
it is loaded, and if it can, writes out the bytecode to the
corresponding .pyc (or .pyo) file.
However, if a Python installation is shared between users, it is a
good idea to byte-compile all modules upon installation, since
other users may not be able to write in the source directories,
and thus they won't be able to write the .pyc/.pyo file, and then
they would be byte-compiling every module each time it is loaded.
This can slow down program start-up considerably.
See compileall.py for a script/module that uses this module to
byte-compile all installed files (or all files in selected
directories).
"""
with open(file, 'U') as f:
try:
timestamp = long(os.fstat(f.fileno()).st_mtime)
except AttributeError:
timestamp = long(os.stat(file).st_mtime)
codestring = f.read()
try:
codeobject = __builtin__.compile(codestring, dfile or file,'exec')
except Exception,err:
py_exc = PyCompileError(err.__class__,err.args,dfile or file)
if doraise:
raise py_exc
else:
sys.stderr.write(py_exc.msg + '\n')
return
if cfile is None:
cfile = file + (__debug__ and 'c' or 'o')
with open(cfile, 'wb') as fc:
fc.write('\0\0\0\0')
wr_long(fc, timestamp)
marshal.dump(codeobject, fc)
fc.flush()
fc.seek(0, 0)
fc.write(MAGIC)
def main(args=None):
"""Compile several source files.
The files named in 'args' (or on the command line, if 'args' is
not specified) are compiled and the resulting bytecode is cached
in the normal manner. This function does not search a directory
structure to locate source files; it only compiles files named
explicitly. If '-' is the only parameter in args, the list of
files is taken from standard input.
"""
if args is None:
args = sys.argv[1:]
rv = 0
if args == ['-']:
while True:
filename = sys.stdin.readline()
if not filename:
break
filename = filename.rstrip('\n')
try:
compile(filename, doraise=True)
except PyCompileError as error:
rv = 1
sys.stderr.write("%s\n" % error.msg)
except IOError as error:
rv = 1
sys.stderr.write("%s\n" % error)
else:
for filename in args:
try:
compile(filename, doraise=True)
except PyCompileError as error:
# return value to indicate at least one failure
rv = 1
sys.stderr.write(error.msg)
return rv
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
vitaly4uk/django | tests/db_typecasts/tests.py | 206 | 2412 | # Unit tests for typecast functions in django.db.backends.util
import datetime
import unittest
from django.db.backends import utils as typecasts
from django.utils import six
TEST_CASES = {
'typecast_date': (
('', None),
(None, None),
('2005-08-11', datetime.date(2005, 8, 11)),
('1990-01-01', datetime.date(1990, 1, 1)),
),
'typecast_time': (
('', None),
(None, None),
('0:00:00', datetime.time(0, 0)),
('0:30:00', datetime.time(0, 30)),
('8:50:00', datetime.time(8, 50)),
('08:50:00', datetime.time(8, 50)),
('12:00:00', datetime.time(12, 00)),
('12:30:00', datetime.time(12, 30)),
('13:00:00', datetime.time(13, 00)),
('23:59:00', datetime.time(23, 59)),
('00:00:12', datetime.time(0, 0, 12)),
('00:00:12.5', datetime.time(0, 0, 12, 500000)),
('7:22:13.312', datetime.time(7, 22, 13, 312000)),
),
'typecast_timestamp': (
('', None),
(None, None),
('2005-08-11 0:00:00', datetime.datetime(2005, 8, 11)),
('2005-08-11 0:30:00', datetime.datetime(2005, 8, 11, 0, 30)),
('2005-08-11 8:50:30', datetime.datetime(2005, 8, 11, 8, 50, 30)),
('2005-08-11 8:50:30.123', datetime.datetime(2005, 8, 11, 8, 50, 30, 123000)),
('2005-08-11 8:50:30.9', datetime.datetime(2005, 8, 11, 8, 50, 30, 900000)),
('2005-08-11 8:50:30.312-05', datetime.datetime(2005, 8, 11, 8, 50, 30, 312000)),
('2005-08-11 8:50:30.312+02', datetime.datetime(2005, 8, 11, 8, 50, 30, 312000)),
# ticket 14453
('2010-10-12 15:29:22.063202', datetime.datetime(2010, 10, 12, 15, 29, 22, 63202)),
('2010-10-12 15:29:22.063202-03', datetime.datetime(2010, 10, 12, 15, 29, 22, 63202)),
('2010-10-12 15:29:22.063202+04', datetime.datetime(2010, 10, 12, 15, 29, 22, 63202)),
('2010-10-12 15:29:22.0632021', datetime.datetime(2010, 10, 12, 15, 29, 22, 63202)),
('2010-10-12 15:29:22.0632029', datetime.datetime(2010, 10, 12, 15, 29, 22, 63202)),
),
}
class DBTypeCasts(unittest.TestCase):
def test_typeCasts(self):
for k, v in six.iteritems(TEST_CASES):
for inpt, expected in v:
got = getattr(typecasts, k)(inpt)
self.assertEqual(got, expected, "In %s: %r doesn't match %r. Got %r instead." % (k, inpt, expected, got))
| bsd-3-clause |
bitifirefly/edx-platform | common/djangoapps/student/migrations/0051_auto__add_courseenrollmentattribute.py | 82 | 17758 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CourseEnrollmentAttribute'
db.create_table('student_courseenrollmentattribute', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('enrollment', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['student.CourseEnrollment'])),
('namespace', self.gf('django.db.models.fields.CharField')(max_length=255)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('value', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('student', ['CourseEnrollmentAttribute'])
def backwards(self, orm):
# Deleting model 'CourseEnrollmentAttribute'
db.delete_table('student_courseenrollmentattribute')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.anonymoususerid': {
'Meta': {'object_name': 'AnonymousUserId'},
'anonymous_user_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseaccessrole': {
'Meta': {'unique_together': "(('user', 'org', 'course_id', 'role'),)", 'object_name': 'CourseAccessRole'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'org': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollmentallowed': {
'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'},
'auto_enroll': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.courseenrollmentattribute': {
'Meta': {'object_name': 'CourseEnrollmentAttribute'},
'enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'namespace': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'student.dashboardconfiguration': {
'Meta': {'ordering': "('-change_date',)", 'object_name': 'DashboardConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recent_enrollment_time_delta': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'student.entranceexamconfiguration': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'EntranceExamConfiguration'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'skip_entrance_exam': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.languageproficiency': {
'Meta': {'unique_together': "(('code', 'user_profile'),)", 'object_name': 'LanguageProficiency'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user_profile': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'language_proficiencies'", 'to': "orm['student.UserProfile']"})
},
'student.linkedinaddtoprofileconfiguration': {
'Meta': {'ordering': "('-change_date',)", 'object_name': 'LinkedInAddToProfileConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'company_identifier': ('django.db.models.fields.TextField', [], {}),
'dashboard_tracking_code': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'trk_partner_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'blank': 'True'})
},
'student.loginfailures': {
'Meta': {'object_name': 'LoginFailures'},
'failure_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lockout_until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.manualenrollmentaudit': {
'Meta': {'object_name': 'ManualEnrollmentAudit'},
'enrolled_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'enrolled_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'state_transition': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'time_stamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'student.passwordhistory': {
'Meta': {'object_name': 'PasswordHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'time_set': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'allow_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'bio': ('django.db.models.fields.CharField', [], {'max_length': '3000', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'profile_image_uploaded_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'student.usersignupsource': {
'Meta': {'object_name': 'UserSignupSource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.userstanding': {
'Meta': {'object_name': 'UserStanding'},
'account_status': ('django.db.models.fields.CharField', [], {'max_length': '31', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'standing_last_changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'standing'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student'] | agpl-3.0 |
mikalstill/nova | nova/api/openstack/compute/tenant_networks.py | 3 | 7723 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import netaddr.core as netexc
from oslo_log import log as logging
import six
from webob import exc
from nova.api.openstack.api_version_request \
import MAX_PROXY_API_SUPPORT_VERSION
from nova.api.openstack.compute.schemas import tenant_networks as schema
from nova.api.openstack import wsgi
from nova.api import validation
import nova.conf
from nova import context as nova_context
from nova import exception
from nova.i18n import _
import nova.network
from nova import objects
from nova.policies import tenant_networks as tn_policies
from nova import quota
CONF = nova.conf.CONF
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
def network_dict(network):
# NOTE(danms): Here, network should be an object, which could have come
# from neutron and thus be missing most of the attributes. Providing a
# default to get() avoids trying to lazy-load missing attributes.
return {"id": network.get("uuid", None) or network.get("id", None),
"cidr": str(network.get("cidr", None)),
"label": network.get("label", None)}
class TenantNetworkController(wsgi.Controller):
def __init__(self, network_api=None):
self.network_api = nova.network.API()
self._default_networks = []
def _refresh_default_networks(self):
self._default_networks = []
if CONF.api.use_neutron_default_nets:
try:
self._default_networks = self._get_default_networks()
except Exception:
LOG.exception("Failed to get default networks")
def _get_default_networks(self):
project_id = CONF.api.neutron_default_tenant_id
ctx = nova_context.RequestContext(user_id=None,
project_id=project_id)
networks = {}
for n in self.network_api.get_all(ctx):
networks[n['id']] = n['label']
return [{'id': k, 'label': v} for k, v in networks.items()]
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@wsgi.expected_errors(())
def index(self, req):
context = req.environ['nova.context']
context.can(tn_policies.BASE_POLICY_NAME)
networks = list(self.network_api.get_all(context))
if not self._default_networks:
self._refresh_default_networks()
networks.extend(self._default_networks)
return {'networks': [network_dict(n) for n in networks]}
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@wsgi.expected_errors(404)
def show(self, req, id):
context = req.environ['nova.context']
context.can(tn_policies.BASE_POLICY_NAME)
try:
network = self.network_api.get(context, id)
except exception.NetworkNotFound:
msg = _("Network not found")
raise exc.HTTPNotFound(explanation=msg)
return {'network': network_dict(network)}
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@wsgi.expected_errors((403, 404, 409))
@wsgi.response(202)
def delete(self, req, id):
context = req.environ['nova.context']
context.can(tn_policies.BASE_POLICY_NAME)
try:
self.network_api.disassociate(context, id)
self.network_api.delete(context, id)
except exception.PolicyNotAuthorized as e:
raise exc.HTTPForbidden(explanation=six.text_type(e))
except exception.NetworkInUse as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.NetworkNotFound:
msg = _("Network not found")
raise exc.HTTPNotFound(explanation=msg)
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@wsgi.expected_errors((400, 403, 409, 503))
@validation.schema(schema.create)
def create(self, req, body):
context = req.environ["nova.context"]
context.can(tn_policies.BASE_POLICY_NAME)
network = body["network"]
keys = ["cidr", "cidr_v6", "ipam", "vlan_start", "network_size",
"num_networks"]
kwargs = {k: network.get(k) for k in keys}
label = network["label"]
if kwargs["cidr"]:
try:
net = netaddr.IPNetwork(kwargs["cidr"])
if net.size < 4:
msg = _("Requested network does not contain "
"enough (2+) usable hosts")
raise exc.HTTPBadRequest(explanation=msg)
except netexc.AddrConversionError:
msg = _("Address could not be converted.")
raise exc.HTTPBadRequest(explanation=msg)
try:
if CONF.enable_network_quota:
objects.Quotas.check_deltas(context, {'networks': 1},
context.project_id)
except exception.OverQuota:
msg = _("Quota exceeded, too many networks.")
raise exc.HTTPForbidden(explanation=msg)
kwargs['project_id'] = context.project_id
try:
networks = self.network_api.create(context,
label=label, **kwargs)
except exception.PolicyNotAuthorized as e:
raise exc.HTTPForbidden(explanation=six.text_type(e))
except exception.CidrConflict as e:
raise exc.HTTPConflict(explanation=e.format_message())
except Exception:
msg = _("Create networks failed")
LOG.exception(msg, extra=network)
raise exc.HTTPServiceUnavailable(explanation=msg)
# NOTE(melwitt): We recheck the quota after creating the object to
# prevent users from allocating more resources than their allowed quota
# in the event of a race. This is configurable because it can be
# expensive if strict quota limits are not required in a deployment.
if CONF.quota.recheck_quota and CONF.enable_network_quota:
try:
objects.Quotas.check_deltas(context, {'networks': 0},
context.project_id)
except exception.OverQuota:
self.network_api.delete(context,
network_dict(networks[0])['id'])
msg = _("Quota exceeded, too many networks.")
raise exc.HTTPForbidden(explanation=msg)
return {"network": network_dict(networks[0])}
def _network_count(context, project_id):
# NOTE(melwitt): This assumes a single cell.
ctx = nova_context.RequestContext(user_id=None, project_id=project_id)
ctx = ctx.elevated()
networks = nova.network.api.API().get_all(ctx)
return {'project': {'networks': len(networks)}}
def _register_network_quota():
if CONF.enable_network_quota:
QUOTAS.register_resource(quota.CountableResource('networks',
_network_count,
'quota_networks'))
_register_network_quota()
| apache-2.0 |
cgoldberg/linux-metrics | example.py | 1 | 1658 | #!/usr/bin/env python
#
# Copyright (c) 2011-2013 Corey Goldberg (http://goldb.org)
#
# This file is part of linux-metrics
#
# License :: OSI Approved :: MIT License:
# http://www.opensource.org/licenses/mit-license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
""" example usage of linux-metrics """
import linux_metrics as lm
def main():
# cpu
print 'procs running: %d' % lm.cpu_stat.procs_running()
cpu_pcts = lm.cpu_stat.cpu_percents(sample_duration=1)
print 'cpu utilization: %.2f%%' % (100 - cpu_pcts['idle'])
# disk
print 'disk busy: %s%%' % lm.disk_stat.disk_busy('sda', sample_duration=1)
r, w = lm.disk_stat.disk_reads_writes('sda1')
print 'disk reads: %s' % r
print 'disk writes: %s' % w
# memory
used, total, _, _, _, _ = lm.mem_stat.mem_stats()
print 'mem used: %s' % used
print 'mem total: %s' % total
# network
rx_bits, tx_bits = lm.net_stat.rx_tx_bits('enp4s0')
print 'net bits received: %s' % rx_bits
print 'net bits sent: %s' % tx_bits
if __name__ == '__main__':
main()
| mit |
newerthcom/savagerebirth | libs/python-2.72/Lib/popen2.py | 304 | 8416 | """Spawn a command with pipes to its stdin, stdout, and optionally stderr.
The normal os.popen(cmd, mode) call spawns a shell command and provides a
file interface to just the input or output of the process depending on
whether mode is 'r' or 'w'. This module provides the functions popen2(cmd)
and popen3(cmd) which return two or three pipes to the spawned command.
"""
import os
import sys
import warnings
warnings.warn("The popen2 module is deprecated. Use the subprocess module.",
DeprecationWarning, stacklevel=2)
__all__ = ["popen2", "popen3", "popen4"]
try:
MAXFD = os.sysconf('SC_OPEN_MAX')
except (AttributeError, ValueError):
MAXFD = 256
_active = []
def _cleanup():
for inst in _active[:]:
if inst.poll(_deadstate=sys.maxint) >= 0:
try:
_active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
class Popen3:
"""Class representing a child process. Normally, instances are created
internally by the functions popen2() and popen3()."""
sts = -1 # Child not completed yet
def __init__(self, cmd, capturestderr=False, bufsize=-1):
"""The parameter 'cmd' is the shell command to execute in a
sub-process. On UNIX, 'cmd' may be a sequence, in which case arguments
will be passed directly to the program without shell intervention (as
with os.spawnv()). If 'cmd' is a string it will be passed to the shell
(as with os.system()). The 'capturestderr' flag, if true, specifies
that the object should capture standard error output of the child
process. The default is false. If the 'bufsize' parameter is
specified, it specifies the size of the I/O buffers to/from the child
process."""
_cleanup()
self.cmd = cmd
p2cread, p2cwrite = os.pipe()
c2pread, c2pwrite = os.pipe()
if capturestderr:
errout, errin = os.pipe()
self.pid = os.fork()
if self.pid == 0:
# Child
os.dup2(p2cread, 0)
os.dup2(c2pwrite, 1)
if capturestderr:
os.dup2(errin, 2)
self._run_child(cmd)
os.close(p2cread)
self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
os.close(c2pwrite)
self.fromchild = os.fdopen(c2pread, 'r', bufsize)
if capturestderr:
os.close(errin)
self.childerr = os.fdopen(errout, 'r', bufsize)
else:
self.childerr = None
def __del__(self):
# In case the child hasn't been waited on, check if it's done.
self.poll(_deadstate=sys.maxint)
if self.sts < 0:
if _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def _run_child(self, cmd):
if isinstance(cmd, basestring):
cmd = ['/bin/sh', '-c', cmd]
os.closerange(3, MAXFD)
try:
os.execvp(cmd[0], cmd)
finally:
os._exit(1)
def poll(self, _deadstate=None):
"""Return the exit status of the child process if it has finished,
or -1 if it hasn't finished yet."""
if self.sts < 0:
try:
pid, sts = os.waitpid(self.pid, os.WNOHANG)
# pid will be 0 if self.pid hasn't terminated
if pid == self.pid:
self.sts = sts
except os.error:
if _deadstate is not None:
self.sts = _deadstate
return self.sts
def wait(self):
"""Wait for and return the exit status of the child process."""
if self.sts < 0:
pid, sts = os.waitpid(self.pid, 0)
# This used to be a test, but it is believed to be
# always true, so I changed it to an assertion - mvl
assert pid == self.pid
self.sts = sts
return self.sts
class Popen4(Popen3):
childerr = None
def __init__(self, cmd, bufsize=-1):
_cleanup()
self.cmd = cmd
p2cread, p2cwrite = os.pipe()
c2pread, c2pwrite = os.pipe()
self.pid = os.fork()
if self.pid == 0:
# Child
os.dup2(p2cread, 0)
os.dup2(c2pwrite, 1)
os.dup2(c2pwrite, 2)
self._run_child(cmd)
os.close(p2cread)
self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
os.close(c2pwrite)
self.fromchild = os.fdopen(c2pread, 'r', bufsize)
if sys.platform[:3] == "win" or sys.platform == "os2emx":
# Some things don't make sense on non-Unix platforms.
del Popen3, Popen4
def popen2(cmd, bufsize=-1, mode='t'):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' may
be a sequence, in which case arguments will be passed directly to the
program without shell intervention (as with os.spawnv()). If 'cmd' is a
string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdout, child_stdin) are returned."""
w, r = os.popen2(cmd, mode, bufsize)
return r, w
def popen3(cmd, bufsize=-1, mode='t'):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' may
be a sequence, in which case arguments will be passed directly to the
program without shell intervention (as with os.spawnv()). If 'cmd' is a
string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdout, child_stdin, child_stderr) are returned."""
w, r, e = os.popen3(cmd, mode, bufsize)
return r, w, e
def popen4(cmd, bufsize=-1, mode='t'):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' may
be a sequence, in which case arguments will be passed directly to the
program without shell intervention (as with os.spawnv()). If 'cmd' is a
string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdout_stderr, child_stdin) are returned."""
w, r = os.popen4(cmd, mode, bufsize)
return r, w
else:
def popen2(cmd, bufsize=-1, mode='t'):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' may
be a sequence, in which case arguments will be passed directly to the
program without shell intervention (as with os.spawnv()). If 'cmd' is a
string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdout, child_stdin) are returned."""
inst = Popen3(cmd, False, bufsize)
return inst.fromchild, inst.tochild
def popen3(cmd, bufsize=-1, mode='t'):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' may
be a sequence, in which case arguments will be passed directly to the
program without shell intervention (as with os.spawnv()). If 'cmd' is a
string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdout, child_stdin, child_stderr) are returned."""
inst = Popen3(cmd, True, bufsize)
return inst.fromchild, inst.tochild, inst.childerr
def popen4(cmd, bufsize=-1, mode='t'):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' may
be a sequence, in which case arguments will be passed directly to the
program without shell intervention (as with os.spawnv()). If 'cmd' is a
string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdout_stderr, child_stdin) are returned."""
inst = Popen4(cmd, bufsize)
return inst.fromchild, inst.tochild
__all__.extend(["Popen3", "Popen4"])
| gpl-2.0 |
ttfseiko/openerp-trunk | openerp/addons/l10n_cl/__init__.py | 2120 | 1456 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mrry/tensorflow | tensorflow/contrib/learn/python/learn/tests/estimators_test.py | 7 | 5456 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom optimizer tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
# TODO(b/29580537): Remove when we deprecate feature column inference.
class InferredfeatureColumnTest(tf.test.TestCase):
"""Custom optimizer tests."""
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
def custom_optimizer():
return tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
cont_features = [
tf.contrib.layers.real_valued_column("", dimension=4)]
classifier = learn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[10, 20, 10],
n_classes=3,
optimizer=custom_optimizer,
config=learn.RunConfig(tf_random_seed=1))
classifier.fit(x_train, y_train, steps=400)
score = accuracy_score(y_test, classifier.predict(x_test))
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
class FeatureEngineeringFunctionTest(tf.test.TestCase):
"""Tests feature_engineering_fn."""
def testFeatureEngineeringFn(self):
def input_fn():
return {"x": tf.constant([1.])}, {"y": tf.constant([11.])}
def feature_engineering_fn(features, targets):
_, _ = features, targets
return {
"transformed_x": tf.constant([9.])
}, {
"transformed_y": tf.constant([99.])
}
def model_fn(features, targets):
# dummy variable:
_ = tf.Variable([0.])
_ = targets
predictions = features["transformed_x"]
loss = tf.constant([2.])
return predictions, loss, tf.no_op()
estimator = tf.contrib.learn.Estimator(
model_fn=model_fn,
feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
def testNoneFeatureEngineeringFn(self):
def input_fn():
return {"x": tf.constant([1.])}, {"y": tf.constant([11.])}
def feature_engineering_fn(features, targets):
_, _ = features, targets
return {"x": tf.constant([9.])}, {"y": tf.constant([99.])}
def model_fn(features, targets):
# dummy variable:
_ = tf.Variable([0.])
_ = targets
predictions = features["x"]
loss = tf.constant([2.])
return predictions, loss, tf.no_op()
estimator_with_fe_fn = tf.contrib.learn.Estimator(
model_fn=model_fn,
feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=1)
estimator_without_fe_fn = tf.contrib.learn.Estimator(model_fn=model_fn)
estimator_without_fe_fn.fit(input_fn=input_fn, steps=1)
# predictions = x
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict(input_fn=input_fn, as_iterable=True))
self.assertEqual(9., prediction_with_fe_fn)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict(input_fn=input_fn, as_iterable=True))
self.assertEqual(1., prediction_without_fe_fn)
class CustomOptimizer(tf.test.TestCase):
"""Custom optimizer tests."""
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
def custom_optimizer():
return tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
classifier = learn.DNNClassifier(
hidden_units=[10, 20, 10],
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
n_classes=3,
optimizer=custom_optimizer,
config=learn.RunConfig(tf_random_seed=1))
classifier.fit(x_train, y_train, steps=400)
score = accuracy_score(y_test, classifier.predict(x_test))
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
tpo/ansible | test/units/cli/test_console.py | 77 | 1829 | # (c) 2016, Thilo Uttendorfer <tlo@sengaya.de>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from units.compat.mock import patch
from ansible.cli.console import ConsoleCLI
class TestConsoleCLI(unittest.TestCase):
def test_parse(self):
cli = ConsoleCLI(['ansible test'])
cli.parse()
self.assertTrue(cli.parser is not None)
def test_module_args(self):
cli = ConsoleCLI(['ansible test'])
cli.parse()
res = cli.module_args('copy')
self.assertTrue(cli.parser is not None)
self.assertIn('src', res)
self.assertIn('backup', res)
self.assertIsInstance(res, list)
@patch('ansible.utils.display.Display.display')
def test_helpdefault(self, mock_display):
cli = ConsoleCLI(['ansible test'])
cli.parse()
cli.modules = set(['copy'])
cli.helpdefault('copy')
self.assertTrue(cli.parser is not None)
self.assertTrue(len(mock_display.call_args_list) > 0,
"display.display should have been called but was not")
| gpl-3.0 |
pmalmgren/consul | vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py | 1232 | 3478 | #!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
import msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464646464.0,
False,
True,
None,
"someday",
"",
"bytestring",
1328176922000002000,
-2206187877999998000,
0,
-6795364578871345152
]
l1 = [
{ "true": True,
"false": False },
{ "true": "True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": "123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": "1234567890" },
{ True: "true", 8: False, "false": 0 }
]
l = []
l.extend(l0)
l.append(l0)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
packer = msgpack.Packer()
serialized = packer.pack(l[i])
f = open(os.path.join(destdir, str(i) + '.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: msgpack_test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
| mpl-2.0 |
ytjiang/django | django/contrib/admin/widgets.py | 8 | 14831 | """
Form Widget classes specific to the Django admin site.
"""
from __future__ import unicode_literals
import copy
from django import forms
from django.contrib.admin.templatetags.admin_static import static
from django.core.urlresolvers import reverse
from django.db.models.deletion import CASCADE
from django.forms.utils import flatatt
from django.forms.widgets import Media, RadioFieldRenderer
from django.template.loader import render_to_string
from django.utils import six
from django.utils.encoding import force_text
from django.utils.html import (
escape, format_html, format_html_join, smart_urlquote,
)
from django.utils.safestring import mark_safe
from django.utils.text import Truncator
from django.utils.translation import ugettext as _
class FilteredSelectMultiple(forms.SelectMultiple):
"""
A SelectMultiple with a JavaScript filter interface.
Note that the resulting JavaScript assumes that the jsi18n
catalog has been loaded in the page
"""
@property
def media(self):
js = ["core.js", "SelectBox.js", "SelectFilter2.js"]
return forms.Media(js=[static("admin/js/%s" % path) for path in js])
def __init__(self, verbose_name, is_stacked, attrs=None, choices=()):
self.verbose_name = verbose_name
self.is_stacked = is_stacked
super(FilteredSelectMultiple, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=()):
if attrs is None:
attrs = {}
attrs['class'] = 'selectfilter'
if self.is_stacked:
attrs['class'] += 'stacked'
output = [super(FilteredSelectMultiple, self).render(name, value, attrs, choices)]
output.append('<script type="text/javascript">addEvent(window, "load", function(e) {')
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append('SelectFilter.init("id_%s", "%s", %s); });</script>\n'
% (name, self.verbose_name.replace('"', '\\"'), int(self.is_stacked)))
return mark_safe(''.join(output))
class AdminDateWidget(forms.DateInput):
@property
def media(self):
js = ["calendar.js", "admin/DateTimeShortcuts.js"]
return forms.Media(js=[static("admin/js/%s" % path) for path in js])
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'vDateField', 'size': '10'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminDateWidget, self).__init__(attrs=final_attrs, format=format)
class AdminTimeWidget(forms.TimeInput):
@property
def media(self):
js = ["calendar.js", "admin/DateTimeShortcuts.js"]
return forms.Media(js=[static("admin/js/%s" % path) for path in js])
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'vTimeField', 'size': '8'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTimeWidget, self).__init__(attrs=final_attrs, format=format)
class AdminSplitDateTime(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
def __init__(self, attrs=None):
widgets = [AdminDateWidget, AdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
def format_output(self, rendered_widgets):
return format_html('<p class="datetime">{} {}<br />{} {}</p>',
_('Date:'), rendered_widgets[0],
_('Time:'), rendered_widgets[1])
class AdminRadioFieldRenderer(RadioFieldRenderer):
def render(self):
"""Outputs a <ul> for this set of radio fields."""
return format_html('<ul{}>\n{}\n</ul>',
flatatt(self.attrs),
format_html_join('\n', '<li>{}</li>',
((force_text(w),) for w in self)))
class AdminRadioSelect(forms.RadioSelect):
renderer = AdminRadioFieldRenderer
class AdminFileWidget(forms.ClearableFileInput):
template_with_initial = ('<p class="file-upload">%s</p>'
% forms.ClearableFileInput.template_with_initial)
template_with_clear = ('<span class="clearable-file-input">%s</span>'
% forms.ClearableFileInput.template_with_clear)
def url_params_from_lookup_dict(lookups):
"""
Converts the type of lookups specified in a ForeignKey limit_choices_to
attribute to a dictionary of query parameters
"""
params = {}
if lookups and hasattr(lookups, 'items'):
items = []
for k, v in lookups.items():
if callable(v):
v = v()
if isinstance(v, (tuple, list)):
v = ','.join(str(x) for x in v)
elif isinstance(v, bool):
# See django.db.fields.BooleanField.get_prep_lookup
v = ('0', '1')[v]
else:
v = six.text_type(v)
items.append((k, v))
params.update(dict(items))
return params
class ForeignKeyRawIdWidget(forms.TextInput):
"""
A Widget for displaying ForeignKeys in the "raw_id" interface rather than
in a <select> box.
"""
def __init__(self, rel, admin_site, attrs=None, using=None):
self.rel = rel
self.admin_site = admin_site
self.db = using
super(ForeignKeyRawIdWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
rel_to = self.rel.to
if attrs is None:
attrs = {}
extra = []
if rel_to in self.admin_site._registry:
# The related object is registered with the same AdminSite
related_url = reverse(
'admin:%s_%s_changelist' % (
rel_to._meta.app_label,
rel_to._meta.model_name,
),
current_app=self.admin_site.name,
)
params = self.url_parameters()
if params:
url = '?' + '&'.join('%s=%s' % (k, v) for k, v in params.items())
else:
url = ''
if "class" not in attrs:
attrs['class'] = 'vForeignKeyRawIdAdminField' # The JavaScript code looks for this hook.
# TODO: "lookup_id_" is hard-coded here. This should instead use
# the correct API to determine the ID dynamically.
extra.append('<a href="%s%s" class="related-lookup" id="lookup_id_%s" title="%s"></a>' %
(related_url, url, name, _('Lookup')))
output = [super(ForeignKeyRawIdWidget, self).render(name, value, attrs)] + extra
if value:
output.append(self.label_for_value(value))
return mark_safe(''.join(output))
def base_url_parameters(self):
limit_choices_to = self.rel.limit_choices_to
if callable(limit_choices_to):
limit_choices_to = limit_choices_to()
return url_params_from_lookup_dict(limit_choices_to)
def url_parameters(self):
from django.contrib.admin.views.main import TO_FIELD_VAR
params = self.base_url_parameters()
params.update({TO_FIELD_VAR: self.rel.get_related_field().name})
return params
def label_for_value(self, value):
key = self.rel.get_related_field().name
try:
obj = self.rel.to._default_manager.using(self.db).get(**{key: value})
return ' <strong>%s</strong>' % escape(Truncator(obj).words(14, truncate='...'))
except (ValueError, self.rel.to.DoesNotExist):
return ''
class ManyToManyRawIdWidget(ForeignKeyRawIdWidget):
"""
A Widget for displaying ManyToMany ids in the "raw_id" interface rather than
in a <select multiple> box.
"""
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
if self.rel.to in self.admin_site._registry:
# The related object is registered with the same AdminSite
attrs['class'] = 'vManyToManyRawIdAdminField'
if value:
value = ','.join(force_text(v) for v in value)
else:
value = ''
return super(ManyToManyRawIdWidget, self).render(name, value, attrs)
def url_parameters(self):
return self.base_url_parameters()
def label_for_value(self, value):
return ''
def value_from_datadict(self, data, files, name):
value = data.get(name)
if value:
return value.split(',')
class RelatedFieldWidgetWrapper(forms.Widget):
"""
This class is a wrapper to a given widget to add the add icon for the
admin interface.
"""
template = 'admin/related_widget_wrapper.html'
def __init__(self, widget, rel, admin_site, can_add_related=None,
can_change_related=False, can_delete_related=False):
self.needs_multipart_form = widget.needs_multipart_form
self.attrs = widget.attrs
self.choices = widget.choices
self.widget = widget
self.rel = rel
# Backwards compatible check for whether a user can add related
# objects.
if can_add_related is None:
can_add_related = rel.to in admin_site._registry
self.can_add_related = can_add_related
# XXX: The UX does not support multiple selected values.
multiple = getattr(widget, 'allow_multiple_selected', False)
self.can_change_related = not multiple and can_change_related
# XXX: The deletion UX can be confusing when dealing with cascading deletion.
cascade = getattr(rel, 'on_delete', None) is CASCADE
self.can_delete_related = not multiple and not cascade and can_delete_related
# so we can check if the related object is registered with this AdminSite
self.admin_site = admin_site
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.widget = copy.deepcopy(self.widget, memo)
obj.attrs = self.widget.attrs
memo[id(self)] = obj
return obj
@property
def is_hidden(self):
return self.widget.is_hidden
@property
def media(self):
media = Media(js=['admin/js/related-widget-wrapper.js'])
return self.widget.media + media
def get_related_url(self, info, action, *args):
return reverse("admin:%s_%s_%s" % (info + (action,)),
current_app=self.admin_site.name, args=args)
def render(self, name, value, *args, **kwargs):
from django.contrib.admin.views.main import IS_POPUP_VAR, TO_FIELD_VAR
rel_opts = self.rel.to._meta
info = (rel_opts.app_label, rel_opts.model_name)
self.widget.choices = self.choices
url_params = '&'.join("%s=%s" % param for param in [
(TO_FIELD_VAR, self.rel.get_related_field().name),
(IS_POPUP_VAR, 1),
])
context = {
'widget': self.widget.render(name, value, *args, **kwargs),
'name': name,
'url_params': url_params,
'model': rel_opts.verbose_name,
}
if self.can_change_related:
change_related_template_url = self.get_related_url(info, 'change', '__fk__')
context.update(
can_change_related=True,
change_related_template_url=change_related_template_url,
)
if self.can_add_related:
add_related_url = self.get_related_url(info, 'add')
context.update(
can_add_related=True,
add_related_url=add_related_url,
)
if self.can_delete_related:
delete_related_template_url = self.get_related_url(info, 'delete', '__fk__')
context.update(
can_delete_related=True,
delete_related_template_url=delete_related_template_url,
)
return mark_safe(render_to_string(self.template, context))
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
self.attrs = self.widget.build_attrs(extra_attrs=None, **kwargs)
return self.attrs
def value_from_datadict(self, data, files, name):
return self.widget.value_from_datadict(data, files, name)
def id_for_label(self, id_):
return self.widget.id_for_label(id_)
class AdminTextareaWidget(forms.Textarea):
def __init__(self, attrs=None):
final_attrs = {'class': 'vLargeTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextareaWidget, self).__init__(attrs=final_attrs)
class AdminTextInputWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextInputWidget, self).__init__(attrs=final_attrs)
class AdminEmailInputWidget(forms.EmailInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminEmailInputWidget, self).__init__(attrs=final_attrs)
class AdminURLFieldWidget(forms.URLInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vURLField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminURLFieldWidget, self).__init__(attrs=final_attrs)
def render(self, name, value, attrs=None):
html = super(AdminURLFieldWidget, self).render(name, value, attrs)
if value:
value = force_text(self._format_value(value))
final_attrs = {'href': smart_urlquote(value)}
html = format_html(
'<p class="url">{} <a{}>{}</a><br />{} {}</p>',
_('Currently:'), flatatt(final_attrs), value,
_('Change:'), html
)
return html
class AdminIntegerFieldWidget(forms.TextInput):
class_name = 'vIntegerField'
def __init__(self, attrs=None):
final_attrs = {'class': self.class_name}
if attrs is not None:
final_attrs.update(attrs)
super(AdminIntegerFieldWidget, self).__init__(attrs=final_attrs)
class AdminBigIntegerFieldWidget(AdminIntegerFieldWidget):
class_name = 'vBigIntegerField'
class AdminCommaSeparatedIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vCommaSeparatedIntegerField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminCommaSeparatedIntegerFieldWidget, self).__init__(attrs=final_attrs)
| bsd-3-clause |
GyrosOfWar/servo | tests/wpt/css-tests/tools/pywebsocket/src/test/test_extensions.py | 413 | 16128 | #!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for extensions module."""
import unittest
import zlib
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket import common
from mod_pywebsocket import extensions
class ExtensionsTest(unittest.TestCase):
"""A unittest for non-class methods in extensions.py"""
def test_parse_window_bits(self):
self.assertRaises(ValueError, extensions._parse_window_bits, None)
self.assertRaises(ValueError, extensions._parse_window_bits, 'foobar')
self.assertRaises(ValueError, extensions._parse_window_bits, ' 8 ')
self.assertRaises(ValueError, extensions._parse_window_bits, 'a8a')
self.assertRaises(ValueError, extensions._parse_window_bits, '00000')
self.assertRaises(ValueError, extensions._parse_window_bits, '00008')
self.assertRaises(ValueError, extensions._parse_window_bits, '0x8')
self.assertRaises(ValueError, extensions._parse_window_bits, '9.5')
self.assertRaises(ValueError, extensions._parse_window_bits, '8.0')
self.assertTrue(extensions._parse_window_bits, '8')
self.assertTrue(extensions._parse_window_bits, '15')
self.assertRaises(ValueError, extensions._parse_window_bits, '-8')
self.assertRaises(ValueError, extensions._parse_window_bits, '0')
self.assertRaises(ValueError, extensions._parse_window_bits, '7')
self.assertRaises(ValueError, extensions._parse_window_bits, '16')
self.assertRaises(
ValueError, extensions._parse_window_bits, '10000000')
class CompressionMethodParameterParserTest(unittest.TestCase):
"""A unittest for _parse_compression_method which parses the compression
method description used by perframe-compression and permessage-compression
extension in their "method" extension parameter.
"""
def test_parse_method_simple(self):
method_list = extensions._parse_compression_method('foo')
self.assertEqual(1, len(method_list))
method = method_list[0]
self.assertEqual('foo', method.name())
self.assertEqual(0, len(method.get_parameters()))
def test_parse_method_with_parameter(self):
method_list = extensions._parse_compression_method('foo; x; y=10')
self.assertEqual(1, len(method_list))
method = method_list[0]
self.assertEqual('foo', method.name())
self.assertEqual(2, len(method.get_parameters()))
self.assertTrue(method.has_parameter('x'))
self.assertEqual(None, method.get_parameter_value('x'))
self.assertTrue(method.has_parameter('y'))
self.assertEqual('10', method.get_parameter_value('y'))
def test_parse_method_with_quoted_parameter(self):
method_list = extensions._parse_compression_method(
'foo; x="Hello World"; y=10')
self.assertEqual(1, len(method_list))
method = method_list[0]
self.assertEqual('foo', method.name())
self.assertEqual(2, len(method.get_parameters()))
self.assertTrue(method.has_parameter('x'))
self.assertEqual('Hello World', method.get_parameter_value('x'))
self.assertTrue(method.has_parameter('y'))
self.assertEqual('10', method.get_parameter_value('y'))
def test_parse_method_multiple(self):
method_list = extensions._parse_compression_method('foo, bar')
self.assertEqual(2, len(method_list))
self.assertEqual('foo', method_list[0].name())
self.assertEqual(0, len(method_list[0].get_parameters()))
self.assertEqual('bar', method_list[1].name())
self.assertEqual(0, len(method_list[1].get_parameters()))
def test_parse_method_multiple_methods_with_quoted_parameter(self):
method_list = extensions._parse_compression_method(
'foo; x="Hello World", bar; y=10')
self.assertEqual(2, len(method_list))
self.assertEqual('foo', method_list[0].name())
self.assertEqual(1, len(method_list[0].get_parameters()))
self.assertTrue(method_list[0].has_parameter('x'))
self.assertEqual('Hello World',
method_list[0].get_parameter_value('x'))
self.assertEqual('bar', method_list[1].name())
self.assertEqual(1, len(method_list[1].get_parameters()))
self.assertTrue(method_list[1].has_parameter('y'))
self.assertEqual('10', method_list[1].get_parameter_value('y'))
def test_create_method_desc_simple(self):
params = common.ExtensionParameter('foo')
desc = extensions._create_accepted_method_desc('foo',
params.get_parameters())
self.assertEqual('foo', desc)
def test_create_method_desc_with_parameters(self):
params = common.ExtensionParameter('foo')
params.add_parameter('x', 'Hello, World')
params.add_parameter('y', '10')
desc = extensions._create_accepted_method_desc('foo',
params.get_parameters())
self.assertEqual('foo; x="Hello, World"; y=10', desc)
class DeflateFrameExtensionProcessorParsingTest(unittest.TestCase):
"""A unittest for checking that DeflateFrameExtensionProcessor parses given
extension parameter correctly.
"""
def test_registry(self):
processor = extensions.get_extension_processor(
common.ExtensionParameter('deflate-frame'))
self.assertIsInstance(processor,
extensions.DeflateFrameExtensionProcessor)
processor = extensions.get_extension_processor(
common.ExtensionParameter('x-webkit-deflate-frame'))
self.assertIsInstance(processor,
extensions.DeflateFrameExtensionProcessor)
def test_minimal_offer(self):
processor = extensions.DeflateFrameExtensionProcessor(
common.ExtensionParameter('perframe-deflate'))
response = processor.get_extension_response()
self.assertEqual('perframe-deflate', response.name())
self.assertEqual(0, len(response.get_parameters()))
self.assertEqual(zlib.MAX_WBITS,
processor._rfc1979_deflater._window_bits)
self.assertFalse(processor._rfc1979_deflater._no_context_takeover)
def test_offer_with_max_window_bits(self):
parameter = common.ExtensionParameter('perframe-deflate')
parameter.add_parameter('max_window_bits', '10')
processor = extensions.DeflateFrameExtensionProcessor(parameter)
response = processor.get_extension_response()
self.assertEqual('perframe-deflate', response.name())
self.assertEqual(0, len(response.get_parameters()))
self.assertEqual(10, processor._rfc1979_deflater._window_bits)
def test_offer_with_out_of_range_max_window_bits(self):
parameter = common.ExtensionParameter('perframe-deflate')
parameter.add_parameter('max_window_bits', '0')
processor = extensions.DeflateFrameExtensionProcessor(parameter)
self.assertIsNone(processor.get_extension_response())
def test_offer_with_max_window_bits_without_value(self):
parameter = common.ExtensionParameter('perframe-deflate')
parameter.add_parameter('max_window_bits', None)
processor = extensions.DeflateFrameExtensionProcessor(parameter)
self.assertIsNone(processor.get_extension_response())
def test_offer_with_no_context_takeover(self):
parameter = common.ExtensionParameter('perframe-deflate')
parameter.add_parameter('no_context_takeover', None)
processor = extensions.DeflateFrameExtensionProcessor(parameter)
response = processor.get_extension_response()
self.assertEqual('perframe-deflate', response.name())
self.assertEqual(0, len(response.get_parameters()))
self.assertTrue(processor._rfc1979_deflater._no_context_takeover)
def test_offer_with_no_context_takeover_with_value(self):
parameter = common.ExtensionParameter('perframe-deflate')
parameter.add_parameter('no_context_takeover', 'foobar')
processor = extensions.DeflateFrameExtensionProcessor(parameter)
self.assertIsNone(processor.get_extension_response())
def test_offer_with_unknown_parameter(self):
parameter = common.ExtensionParameter('perframe-deflate')
parameter.add_parameter('foo', 'bar')
processor = extensions.DeflateFrameExtensionProcessor(parameter)
response = processor.get_extension_response()
self.assertEqual('perframe-deflate', response.name())
self.assertEqual(0, len(response.get_parameters()))
class PerMessageDeflateExtensionProcessorParsingTest(unittest.TestCase):
"""A unittest for checking that PerMessageDeflateExtensionProcessor parses
given extension parameter correctly.
"""
def test_registry(self):
processor = extensions.get_extension_processor(
common.ExtensionParameter('permessage-deflate'))
self.assertIsInstance(processor,
extensions.PerMessageDeflateExtensionProcessor)
def test_minimal_offer(self):
processor = extensions.PerMessageDeflateExtensionProcessor(
common.ExtensionParameter('permessage-deflate'))
response = processor.get_extension_response()
self.assertEqual('permessage-deflate', response.name())
self.assertEqual(0, len(response.get_parameters()))
self.assertEqual(zlib.MAX_WBITS,
processor._rfc1979_deflater._window_bits)
self.assertFalse(processor._rfc1979_deflater._no_context_takeover)
def test_offer_with_max_window_bits(self):
parameter = common.ExtensionParameter('permessage-deflate')
parameter.add_parameter('server_max_window_bits', '10')
processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
response = processor.get_extension_response()
self.assertEqual('permessage-deflate', response.name())
self.assertEqual([('server_max_window_bits', '10')],
response.get_parameters())
self.assertEqual(10, processor._rfc1979_deflater._window_bits)
def test_offer_with_out_of_range_max_window_bits(self):
parameter = common.ExtensionParameter('permessage-deflate')
parameter.add_parameter('server_max_window_bits', '0')
processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
self.assertIsNone(processor.get_extension_response())
def test_offer_with_max_window_bits_without_value(self):
parameter = common.ExtensionParameter('permessage-deflate')
parameter.add_parameter('server_max_window_bits', None)
processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
self.assertIsNone(processor.get_extension_response())
def test_offer_with_no_context_takeover(self):
parameter = common.ExtensionParameter('permessage-deflate')
parameter.add_parameter('server_no_context_takeover', None)
processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
response = processor.get_extension_response()
self.assertEqual('permessage-deflate', response.name())
self.assertEqual([('server_no_context_takeover', None)],
response.get_parameters())
self.assertTrue(processor._rfc1979_deflater._no_context_takeover)
def test_offer_with_no_context_takeover_with_value(self):
parameter = common.ExtensionParameter('permessage-deflate')
parameter.add_parameter('server_no_context_takeover', 'foobar')
processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
self.assertIsNone(processor.get_extension_response())
def test_offer_with_unknown_parameter(self):
parameter = common.ExtensionParameter('permessage-deflate')
parameter.add_parameter('foo', 'bar')
processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
self.assertIsNone(processor.get_extension_response())
class PerMessageDeflateExtensionProcessorBuildingTest(unittest.TestCase):
"""A unittest for checking that PerMessageDeflateExtensionProcessor builds
a response based on specified options correctly.
"""
def test_response_with_max_window_bits(self):
parameter = common.ExtensionParameter('permessage-deflate')
parameter.add_parameter('client_max_window_bits', None)
processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
processor.set_client_max_window_bits(10)
response = processor.get_extension_response()
self.assertEqual('permessage-deflate', response.name())
self.assertEqual([('client_max_window_bits', '10')],
response.get_parameters())
def test_response_with_max_window_bits_without_client_permission(self):
processor = extensions.PerMessageDeflateExtensionProcessor(
common.ExtensionParameter('permessage-deflate'))
processor.set_client_max_window_bits(10)
response = processor.get_extension_response()
self.assertIsNone(response)
def test_response_with_true_for_no_context_takeover(self):
processor = extensions.PerMessageDeflateExtensionProcessor(
common.ExtensionParameter('permessage-deflate'))
processor.set_client_no_context_takeover(True)
response = processor.get_extension_response()
self.assertEqual('permessage-deflate', response.name())
self.assertEqual([('client_no_context_takeover', None)],
response.get_parameters())
def test_response_with_false_for_no_context_takeover(self):
processor = extensions.PerMessageDeflateExtensionProcessor(
common.ExtensionParameter('permessage-deflate'))
processor.set_client_no_context_takeover(False)
response = processor.get_extension_response()
self.assertEqual('permessage-deflate', response.name())
self.assertEqual(0, len(response.get_parameters()))
class PerMessageCompressExtensionProcessorTest(unittest.TestCase):
def test_registry(self):
processor = extensions.get_extension_processor(
common.ExtensionParameter('permessage-compress'))
self.assertIsInstance(processor,
extensions.PerMessageCompressExtensionProcessor)
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 |
darthdeus/dotfiles | c_ycm_conf.py | 1 | 5178 | # This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-std=gnu11',
'-x',
'c',
'-isystem',
'/usr/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
# try:
# final_flags.remove( '-stdlib=libc++' )
# except ValueError:
# pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| mit |
ptisserand/ansible | lib/ansible/modules/network/nxos/nxos_static_route.py | 16 | 9227 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_static_route
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages static route configuration
description:
- Manages static route configuration
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- If no vrf is supplied, vrf is set to default.
- If C(state=absent), the route will be removed, regardless of the
non-required parameters.
options:
prefix:
description:
- Destination prefix of static route.
required: true
aliases:
- address
next_hop:
description:
- Next hop address or interface of static route.
If interface, it must be the fully-qualified interface name.
required: true
vrf:
description:
- VRF for static route.
default: default
tag:
description:
- Route tag value (numeric) or keyword 'default'.
route_name:
description:
- Name of the route or keyword 'default'. Used with the name parameter on the CLI.
pref:
description:
- Preference or administrative difference of route (range 1-255) or keyword 'default'.
aliases:
- admin_distance
aggregate:
description: List of static route definitions
version_added: 2.5
state:
description:
- Manage the state of the resource.
choices: ['present','absent']
default: 'present'
'''
EXAMPLES = '''
- nxos_static_route:
prefix: "192.168.20.64/24"
next_hop: "3.3.3.3"
route_name: testing
pref: 100
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["ip route 192.168.20.0/24 3.3.3.3 name testing 100"]
'''
import re
from copy import deepcopy
from ansible.module_utils.network.nxos.nxos import get_config, load_config
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import CustomNetworkConfig
from ansible.module_utils.network.common.utils import remove_default_spec
def reconcile_candidate(module, candidate, prefix, w):
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
state = w['state']
set_command = set_route_command(prefix, w)
remove_command = remove_route_command(prefix, w)
parents = []
commands = []
yrc = remove_command.replace('no ', '')
if w['vrf'] == 'default':
netcfg = str(netcfg).split('\n')
ncfg = []
for line in netcfg:
# remove ip route commands of non-default vrfs from
# the running config just in case the same commands
# exist in default and non-default vrfs
if ' ip route' not in line:
ncfg.append(line)
if any(yrc in s for s in ncfg) and state == 'absent':
commands = [remove_command]
elif set_command not in ncfg and state == 'present':
if any(yrc in s for s in ncfg):
commands = [remove_command, set_command]
else:
commands = [set_command]
else:
parents = ['vrf context {0}'.format(w['vrf'])]
config = netcfg.get_section(parents)
if not isinstance(config, list):
config = config.split('\n')
config = [line.strip() for line in config]
if any(yrc in s for s in config) and state == 'absent':
commands = [remove_command]
elif set_command not in config and state == 'present':
if any(yrc in s for s in config):
commands = [remove_command, set_command]
else:
commands = [set_command]
if commands:
candidate.add(commands, parents=parents)
def remove_route_command(prefix, w):
return 'no ip route {0} {1}'.format(prefix, w['next_hop'])
def set_route_command(prefix, w):
route_cmd = 'ip route {0} {1}'.format(prefix, w['next_hop'])
if w['route_name'] and w['route_name'] != 'default':
route_cmd += ' name {0}'.format(w['route_name'])
if w['tag']:
if w['tag'] != 'default' and w['tag'] != '0':
route_cmd += ' tag {0}'.format(w['tag'])
if w['pref'] and w['pref'] != 'default':
route_cmd += ' {0}'.format(w['pref'])
return route_cmd
def get_dotted_mask(mask):
bits = 0
for i in range(32 - mask, 32):
bits |= (1 << i)
mask = ("%d.%d.%d.%d" % ((bits & 0xff000000) >> 24, (bits & 0xff0000) >> 16, (bits & 0xff00) >> 8, (bits & 0xff)))
return mask
def get_network_start(address, netmask):
address = address.split('.')
netmask = netmask.split('.')
return [str(int(address[x]) & int(netmask[x])) for x in range(0, 4)]
def network_from_string(address, mask, module):
octects = address.split('.')
if len(octects) > 4:
module.fail_json(msg='Incorrect address format.', address=address)
for octect in octects:
try:
if int(octect) < 0 or int(octect) > 255:
module.fail_json(msg='Address may contain invalid values.',
address=address)
except ValueError:
module.fail_json(msg='Address may contain non-integer values.',
address=address)
try:
if int(mask) < 0 or int(mask) > 32:
module.fail_json(msg='Incorrect mask value.', mask=mask)
except ValueError:
module.fail_json(msg='Mask may contain non-integer values.', mask=mask)
netmask = get_dotted_mask(int(mask))
return '.'.join(get_network_start(address, netmask))
def normalize_prefix(module, prefix):
splitted_prefix = prefix.split('/')
address = splitted_prefix[0]
if len(splitted_prefix) > 2:
module.fail_json(msg='Incorrect address format.', address=address)
elif len(splitted_prefix) == 2:
mask = splitted_prefix[1]
network = network_from_string(address, mask, module)
normalized_prefix = str(network) + '/' + str(mask)
else:
normalized_prefix = prefix + '/' + str(32)
return normalized_prefix
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
d = item.copy()
obj.append(d)
else:
obj.append({
'prefix': module.params['prefix'],
'next_hop': module.params['next_hop'],
'vrf': module.params['vrf'],
'tag': module.params['tag'],
'route_name': module.params['route_name'],
'pref': module.params['pref'],
'state': module.params['state']
})
return obj
def main():
element_spec = dict(
prefix=dict(type='str', aliases=['address']),
next_hop=dict(type='str'),
vrf=dict(type='str', default='default'),
tag=dict(type='str'),
route_name=dict(type='str'),
pref=dict(type='str', aliases=['admin_distance']),
state=dict(choices=['absent', 'present'], default='present'),
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['prefix'] = dict(required=True)
aggregate_spec['next_hop'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec)
)
argument_spec.update(element_spec)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
warnings = list()
result = {'changed': False, 'commands': []}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
for w in want:
prefix = normalize_prefix(module, w['prefix'])
candidate = CustomNetworkConfig(indent=3)
reconcile_candidate(module, candidate, prefix, w)
if candidate:
candidate = candidate.items_text()
load_config(module, candidate)
result['commands'].extend(candidate)
result['changed'] = True
else:
result['commands'] = []
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
ElvishArtisan/drouter | python/examples/show_destinations.py | 1 | 1976 | #!%PYTHON_BANGPATH%
# show_destinations.py
#
# Drouter state script to enumerate all destinations
#
# (C) Copyright 2018-2019 Fred Gleason <fredg@paravelsystems.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
import Drouter.StateEngine
# ############################################################################
#
# Callbacks
#
# These are called by the 'StateEngine' object in response to specific events.
#
#
# Called immediately after the 'StateEngine' object has completed
# initialization. This is the place to do any needed startup initialization
# (create objects, open connections, etc).
#
# For this script, we simply enumerate all destinations, then exit
#
def EngineReady(engine,priv):
destinations=engine.Destinations()
for destination in destinations:
print("*********************************")
print(destination)
print("*********************************")
exit(0)
# ############################################################################
#
# Event Loop
#
# Create a 'StateEngine' object to talk to the drouter service.
#
engine=Drouter.StateEngine.StateEngine()
#
# Set the "ready" callback so we receive notification when the engine
# has completed initialization.
#
engine.setReadyCallback(EngineReady)
#
# Start the engine, giving the hostname/address of the Drouter service.
#
engine.start("localhost")
| gpl-2.0 |
Boussadia/weboob | modules/fortuneo/backend.py | 2 | 2415 | # -*- coding: utf-8 -*-
# Copyright(C) 2012 Gilles-Alexandre Quenot
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.bank import ICapBank, AccountNotFound
from weboob.tools.backend import BaseBackend, BackendConfig
from weboob.tools.value import ValueBackendPassword
from .browser import Fortuneo
__all__ = ['FortuneoBackend']
class FortuneoBackend(BaseBackend, ICapBank):
NAME = 'fortuneo'
MAINTAINER = u'Gilles-Alexandre Quenot'
EMAIL = 'gilles.quenot@gmail.com'
VERSION = '0.i'
LICENSE = 'AGPLv3+'
DESCRIPTION = u'Fortuneo'
CONFIG = BackendConfig(
ValueBackendPassword(
'login',
label='Identifiant',
masked=False,
required=True
),
ValueBackendPassword(
'password',
label='Mot de passe',
required=True
)
)
BROWSER = Fortuneo
def create_default_browser(self):
return self.create_browser(
self.config['login'].get(),
self.config['password'].get()
)
def iter_accounts(self):
"""Iter accounts"""
for account in self.browser.get_accounts_list():
yield account
def get_account(self, _id):
with self.browser:
account = self.browser.get_account(_id)
if account:
return account
else:
raise AccountNotFound()
def iter_history(self, account):
"""Iter history of transactions on a specific account"""
with self.browser:
for history in self.browser.get_history(account):
yield history
# vim:ts=4:sw=4
| agpl-3.0 |
MyRookie/SentimentAnalyse | venv/lib/python2.7/site-packages/numpy/linalg/linalg.py | 11 | 75845 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast, atleast_2d, intp, asanyarray, isscalar
)
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % len(a.shape))
def _assertRankAtLeast2(*arrays):
for a in arrays:
if len(a.shape) < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % len(a.shape))
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _assertNoEmpty2d(*arrays):
for a in arrays:
if a.size == 0 and product(a.shape[-2:]) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv, einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
if a.shape[-1] == 0 and b.shape[-1] == 0:
# Legal, but the ufunc cannot handle the 0-sized inner dims
# let the ufunc handle all wrong cases.
a = a.reshape(a.shape[:-1])
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve1
else:
if b.size == 0:
if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:
a = a[:,:1].reshape(a.shape[:-1] + (1,))
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if a.shape[-1] == 0:
# The inner array is 0x0, the ufunc cannot handle this case
return wrap(empty_like(a, dtype=result_t))
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
Numpy 1.8 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated.",
warnings.warn(msg, DeprecationWarning)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Same as `lower`, with 'L' for lower and 'U' for upper triangular.
Deprecated.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _syevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
(conjugate symmetric) array.
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[..., 0]/s[..., -1]
else:
return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
if isscalar(sign):
sign = sign.astype(result_t)
else:
sign = sign.astype(result_t, copy=False)
if isscalar(logdet):
logdet = logdet.astype(real_t)
else:
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
if isscalar(r):
r = r.astype(result_t)
else:
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print(m, c)
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
st = s[:min(n, m)].astype(result_real_t, copy=True)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax or numpy.sum.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4.0
>>> LA.norm(b, np.inf)
9.0
>>> LA.norm(a, -np.inf)
0.0
>>> LA.norm(b, -np.inf)
2.0
>>> LA.norm(a, 1)
20.0
>>> LA.norm(b, 1)
7.0
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6.0
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([ 6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
if not issubclass(x.dtype.type, inexact):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).astype(float).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
if row_axis < 0:
row_axis += nd
if col_axis < 0:
col_axis += nd
if not (0 <= row_axis < nd and 0 <= col_axis < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random(10000, 100)
>>> B = np.random.random(100, 1000)
>>> C = np.random.random(1000, 5)
>>> D = np.random.random(5, 333)
>>> # the actual dot multiplication
>>> multi_dot([A, B, C, D])
instead of::
>>> np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> A.dot(B).dot(C).dot(D)
Example: multiplication costs of different parenthesizations
------------------------------------------------------------
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}$`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
# cost1 = cost((AB)C)
cost1 = (A.shape[0] * A.shape[1] * B.shape[1] + # (AB)
A.shape[0] * B.shape[1] * C.shape[1]) # (--)C
# cost2 = cost((AB)C)
cost2 = (B.shape[0] * B.shape[1] * C.shape[1] + # (BC)
A.shape[0] * A.shape[1] * C.shape[1]) # A(--)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))
| mit |
rbrito/pkg-youtube-dl | youtube_dl/extractor/bellmedia.py | 8 | 2971 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class BellMediaIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?
(?P<domain>
(?:
ctv|
tsn|
bnn(?:bloomberg)?|
thecomedynetwork|
discovery|
discoveryvelocity|
sciencechannel|
investigationdiscovery|
animalplanet|
bravo|
mtv|
space|
etalk|
marilyn
)\.ca|
(?:much|cp24)\.com
)/.*?(?:\b(?:vid(?:eoid)?|clipId)=|-vid|~|%7E|/(?:episode)?)(?P<id>[0-9]{6,})'''
_TESTS = [{
'url': 'https://www.bnnbloomberg.ca/video/david-cockfield-s-top-picks~1403070',
'md5': '36d3ef559cfe8af8efe15922cd3ce950',
'info_dict': {
'id': '1403070',
'ext': 'flv',
'title': 'David Cockfield\'s Top Picks',
'description': 'md5:810f7f8c6a83ad5b48677c3f8e5bb2c3',
'upload_date': '20180525',
'timestamp': 1527288600,
},
}, {
'url': 'http://www.thecomedynetwork.ca/video/player?vid=923582',
'only_matching': True,
}, {
'url': 'http://www.tsn.ca/video/expectations-high-for-milos-raonic-at-us-open~939549',
'only_matching': True,
}, {
'url': 'http://www.bnn.ca/video/berman-s-call-part-two-viewer-questions~939654',
'only_matching': True,
}, {
'url': 'http://www.ctv.ca/YourMorning/Video/S1E6-Monday-August-29-2016-vid938009',
'only_matching': True,
}, {
'url': 'http://www.much.com/shows/atmidnight/episode948007/tuesday-september-13-2016',
'only_matching': True,
}, {
'url': 'http://www.much.com/shows/the-almost-impossible-gameshow/928979/episode-6',
'only_matching': True,
}, {
'url': 'http://www.ctv.ca/DCs-Legends-of-Tomorrow/Video/S2E11-Turncoat-vid1051430',
'only_matching': True,
}, {
'url': 'http://www.etalk.ca/video?videoid=663455',
'only_matching': True,
}, {
'url': 'https://www.cp24.com/video?clipId=1982548',
'only_matching': True,
}]
_DOMAINS = {
'thecomedynetwork': 'comedy',
'discoveryvelocity': 'discvel',
'sciencechannel': 'discsci',
'investigationdiscovery': 'invdisc',
'animalplanet': 'aniplan',
'etalk': 'ctv',
'bnnbloomberg': 'bnn',
'marilyn': 'ctv_marilyn',
}
def _real_extract(self, url):
domain, video_id = re.match(self._VALID_URL, url).groups()
domain = domain.split('.')[0]
return {
'_type': 'url_transparent',
'id': video_id,
'url': '9c9media:%s_web:%s' % (self._DOMAINS.get(domain, domain), video_id),
'ie_key': 'NineCNineMedia',
}
| unlicense |
patrioticcow/MessagesForSkype | packages/win32/bundle/MessagesForSkype/modules/python/1.3.1-beta/Lib/bisect.py | 91 | 2394 | """Bisection algorithms."""
def insort_right(a, x, lo=0, hi=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the right of the rightmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid]: hi = mid
else: lo = mid+1
a.insert(lo, x)
insort = insort_right # backward compatibility
def bisect_right(a, x, lo=0, hi=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e <= x, and all e in
a[i:] have e > x. So if x already appears in the list, a.insert(x) will
insert just after the rightmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid]: hi = mid
else: lo = mid+1
return lo
bisect = bisect_right # backward compatibility
def insort_left(a, x, lo=0, hi=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the left of the leftmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if a[mid] < x: lo = mid+1
else: hi = mid
a.insert(lo, x)
def bisect_left(a, x, lo=0, hi=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e < x, and all e in
a[i:] have e >= x. So if x already appears in the list, a.insert(x) will
insert just before the leftmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if a[mid] < x: lo = mid+1
else: hi = mid
return lo
# Overwrite above definitions with a fast C implementation
try:
from _bisect import bisect_right, bisect_left, insort_left, insort_right, insort, bisect
except ImportError:
pass
| mit |
mitchrule/Miscellaneous | Django_Project/django/Lib/site-packages/django/db/backends/sqlite3/features.py | 119 | 2631 | from __future__ import unicode_literals
import sys
from django.db import utils
from django.db.backends.base.features import BaseDatabaseFeatures
from django.utils.functional import cached_property
from .base import Database
try:
import pytz
except ImportError:
pytz = None
class DatabaseFeatures(BaseDatabaseFeatures):
# SQLite cannot handle us only partially reading from a cursor's result set
# and then writing the same rows to the database in another cursor. This
# setting ensures we always read result sets fully into memory all in one
# go.
can_use_chunked_reads = False
test_db_allows_multiple_connections = False
supports_unspecified_pk = True
supports_timezones = False
supports_1000_query_parameters = False
supports_mixed_date_datetime_comparisons = False
has_bulk_insert = True
can_combine_inserts_with_and_without_auto_increment_pk = False
supports_foreign_keys = False
supports_column_check_constraints = False
autocommits_when_autocommit_is_off = True
can_introspect_decimal_field = False
can_introspect_positive_integer_field = True
can_introspect_small_integer_field = True
supports_transactions = True
atomic_transactions = False
can_rollback_ddl = True
supports_paramstyle_pyformat = False
supports_sequence_reset = False
@cached_property
def uses_savepoints(self):
return Database.sqlite_version_info >= (3, 6, 8)
@cached_property
def can_release_savepoints(self):
return self.uses_savepoints
@cached_property
def can_share_in_memory_db(self):
return (
sys.version_info[:2] >= (3, 4) and
Database.__name__ == 'sqlite3.dbapi2' and
Database.sqlite_version_info >= (3, 7, 13)
)
@cached_property
def supports_stddev(self):
"""Confirm support for STDDEV and related stats functions
SQLite supports STDDEV as an extension package; so
connection.ops.check_expression_support() can't unilaterally
rule out support for STDDEV. We need to manually check
whether the call works.
"""
with self.connection.cursor() as cursor:
cursor.execute('CREATE TABLE STDDEV_TEST (X INT)')
try:
cursor.execute('SELECT STDDEV(*) FROM STDDEV_TEST')
has_support = True
except utils.DatabaseError:
has_support = False
cursor.execute('DROP TABLE STDDEV_TEST')
return has_support
@cached_property
def has_zoneinfo_database(self):
return pytz is not None
| mit |
DirtyPiece/dancestudio | Build/Tools/Python27/Lib/site-packages/pip/_vendor/requests/packages/urllib3/util/connection.py | 319 | 1348 | from socket import error as SocketError
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if sock is False: # Platform-specific: AppEngine
return False
if sock is None: # Connection already closed (such as by httplib).
return False
if not poll:
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except SocketError:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
| mit |
blooparksystems/odoo | addons/account_check_printing/account_payment.py | 37 | 7203 | # -*- coding: utf-8 -*-
import math
from openerp import models, fields, api, _
from openerp.tools import amount_to_text_en, float_round
from openerp.exceptions import UserError, ValidationError
class account_register_payments(models.TransientModel):
_inherit = "account.register.payments"
check_amount_in_words = fields.Char(string="Amount in Words")
check_manual_sequencing = fields.Boolean(related='journal_id.check_manual_sequencing')
# Note: a check_number == 0 means that it will be attributed when the check is printed
check_number = fields.Integer(string="Check Number", readonly=True, copy=False, default=0,
help="Number of the check corresponding to this payment. If your pre-printed check are not already numbered, "
"you can manage the numbering in the journal configuration page.")
@api.onchange('journal_id')
def _onchange_journal_id(self):
if hasattr(super(account_register_payments, self), '_onchange_journal_id'):
super(account_register_payments, self)._onchange_journal_id()
if self.journal_id.check_manual_sequencing:
self.check_number = self.journal_id.check_sequence_id.number_next_actual
@api.onchange('amount')
def _onchange_amount(self):
if hasattr(super(account_register_payments, self), '_onchange_amount'):
super(account_register_payments, self)._onchange_amount()
# TODO: merge, refactor and complete the amount_to_text and amount_to_text_en classes
check_amount_in_words = amount_to_text_en.amount_to_text(math.floor(self.amount), lang='en', currency='')
check_amount_in_words = check_amount_in_words.replace(' and Zero Cent', '') # Ugh
decimals = self.amount % 1
if decimals >= 10**-2:
check_amount_in_words += _(' and %s/100') % str(int(round(float_round(decimals*100, precision_rounding=1))))
self.check_amount_in_words = check_amount_in_words
def get_payment_vals(self):
res = super(account_register_payments, self).get_payment_vals()
if self.payment_method_id == self.env.ref('account_check_printing.account_payment_method_check'):
res.update({
'check_amount_in_words': self.check_amount_in_words,
'check_manual_sequencing': self.check_manual_sequencing,
})
return res
class account_payment(models.Model):
_inherit = "account.payment"
check_amount_in_words = fields.Char(string="Amount in Words")
check_manual_sequencing = fields.Boolean(related='journal_id.check_manual_sequencing')
check_number = fields.Integer(string="Check Number", readonly=True, copy=False,
help="The selected journal is configured to print check numbers. If your pre-printed check paper already has numbers "
"or if the current numbering is wrong, you can change it in the journal configuration page.")
@api.onchange('journal_id')
def _onchange_journal_id(self):
if hasattr(super(account_payment, self), '_onchange_journal_id'):
super(account_payment, self)._onchange_journal_id()
if self.journal_id.check_manual_sequencing:
self.check_number = self.journal_id.check_sequence_id.number_next_actual
@api.onchange('amount')
def _onchange_amount(self):
if hasattr(super(account_payment, self), '_onchange_amount'):
super(account_payment, self)._onchange_amount()
check_amount_in_words = amount_to_text_en.amount_to_text(math.floor(self.amount), lang='en', currency='')
check_amount_in_words = check_amount_in_words.replace(' and Zero Cent', '') # Ugh
decimals = self.amount % 1
if decimals >= 10**-2:
check_amount_in_words += _(' and %s/100') % str(int(round(float_round(decimals*100, precision_rounding=1))))
self.check_amount_in_words = check_amount_in_words
def _check_communication(self, payment_method_id, communication):
super(account_payment, self)._check_communication(payment_method_id, communication)
if payment_method_id == self.env.ref('account_check_printing.account_payment_method_check').id:
if not communication:
return
if len(communication) > 60:
raise ValidationError(_("A check memo cannot exceed 60 characters."))
@api.model
def create(self, vals):
if vals['payment_method_id'] == self.env.ref('account_check_printing.account_payment_method_check').id\
and vals.get('check_manual_sequencing'):
sequence = self.env['account.journal'].browse(vals['journal_id']).check_sequence_id
vals.update({'check_number': sequence.next_by_id()})
return super(account_payment, self.sudo()).create(vals)
@api.multi
def print_checks(self):
""" Check that the recordset is valid, set the payments state to sent and call print_checks() """
# Since this method can be called via a client_action_multi, we need to make sure the received records are what we expect
self = self.filtered(lambda r: r.payment_method_id.code == 'check_printing' and r.state != 'reconciled')
if len(self) == 0:
raise UserError(_("Payments to print as a checks must have 'Check' selected as payment method and "
"not have already been reconciled"))
if any(payment.journal_id != self[0].journal_id for payment in self):
raise UserError(_("In order to print multiple checks at once, they must belong to the same bank journal."))
self.filtered(lambda r: r.state == 'draft').post()
self.write({'state': 'sent'})
if not self[0].journal_id.check_manual_sequencing:
# The wizard asks for the number printed on the first pre-printed check
# so payments are attributed the number of the check the'll be printed on.
last_printed_check = self.search([
('journal_id', '=', self[0].journal_id.id),
('check_number', '!=', 0)], order="check_number desc", limit=1)
next_check_number = last_printed_check and last_printed_check.check_number + 1 or 1
return {
'name': _('Print Pre-numbered Checks'),
'type': 'ir.actions.act_window',
'res_model': 'print.prenumbered.checks',
'view_type': 'form',
'view_mode': 'form',
'target': 'new',
'context': {
'payment_ids': self.ids,
'default_next_check_number': next_check_number,
}
}
else:
return self.do_print_checks()
@api.multi
def unmark_sent(self):
self.write({'state': 'posted'})
@api.multi
def do_print_checks(self):
""" This method is a hook for l10n_xx_check_printing modules to implement actual check printing capabilities """
raise UserError(_("There is no check layout configured.\nMake sure the proper check printing module is installed"
" and its configuration (in company settings > 'Configuration' tab) is correct."))
| gpl-3.0 |
ThinkOpen-Solutions/odoo | addons/mrp_repair/wizard/make_invoice.py | 172 | 3150 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class make_invoice(osv.osv_memory):
_name = 'mrp.repair.make_invoice'
_description = 'Make Invoice'
_columns = {
'group': fields.boolean('Group by partner invoice address'),
}
def make_invoices(self, cr, uid, ids, context=None):
""" Generates invoice(s) of selected records.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return: Loads the view of new invoice(s).
"""
if context is None:
context = {}
inv = self.browse(cr, uid, ids[0], context=context)
order_obj = self.pool.get('mrp.repair')
mod_obj = self.pool.get('ir.model.data')
newinv = order_obj.action_invoice_create(cr, uid, context['active_ids'],
group=inv.group,context=context)
# We have to trigger the workflow of the given repairs, otherwise they remain 'to be invoiced'.
# Note that the signal 'action_invoice_create' will trigger another call to the method 'action_invoice_create',
# but that second call will not do anything, since the repairs are already invoiced.
order_obj.signal_workflow(cr, uid, context['active_ids'], 'action_invoice_create')
form_res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_form')
form_id = form_res and form_res[1] or False
tree_res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_tree')
tree_id = tree_res and tree_res[1] or Fals
return {
'domain': [('id','in', newinv.values())],
'name': 'Invoices',
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.invoice',
'view_id': False,
'views': [(tree_id, 'tree'),(form_id, 'form')],
'context': "{'type':'out_invoice'}",
'type': 'ir.actions.act_window'
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
lavalamp/test-infra | experiment/flakedetector.py | 11 | 3269 | #!/usr/bin/env python3
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Counts the number of flakes in PRs using data from prow.
A flake is counted if a job passes and fails for the same pull commit. This
isn't a perfect signal, since something might have happened on master that
makes it flake, but I think it's good enough. There will also be false
negatives when flakes don't show up because the PR author changed the PR.
Still, this is a good signal.
Only serial jobs are considered for the flake calculation, batch jobs are
ignored.
"""
from __future__ import print_function
import operator
import requests
def main(): # pylint: disable=too-many-branches
"""Run flake detector."""
res = requests.get('https://prow.k8s.io/data.js')
job_results = res.json()
jobs = {} # job -> {sha -> [results...]}
commits = {} # sha -> {job -> [results...]}
for res in job_results:
if res['type'] != 'presubmit':
continue
if res['repo'] != 'kubernetes/kubernetes':
continue
if res['state'] != 'success' and res['state'] != 'failure':
continue
# populate jobs
if res['job'] not in jobs:
jobs[res['job']] = {}
if res['pull_sha'] not in jobs[res['job']]:
jobs[res['job']][res['pull_sha']] = []
jobs[res['job']][res['pull_sha']].append(res['state'])
# populate commits
if res['pull_sha'] not in commits:
commits[res['pull_sha']] = {}
if res['job'] not in commits[res['pull_sha']]:
commits[res['pull_sha']][res['job']] = []
commits[res['pull_sha']][res['job']].append(res['state'])
job_commits = {}
job_flakes = {}
for job, shas in jobs.items():
job_commits[job] = len(shas)
job_flakes[job] = 0
for results in shas.values():
if 'success' in results and 'failure' in results:
job_flakes[job] += 1
print('Certain flakes:')
for job, flakes in sorted(job_flakes.items(), key=operator.itemgetter(1), reverse=True):
if job_commits[job] < 10:
continue
fail_chance = flakes / job_commits[job]
print('{}/{}\t({:.0f}%)\t{}'.format(flakes, job_commits[job], 100*fail_chance, job))
# for each commit, flaked iff exists job that flaked
flaked = 0
for _, job_results in commits.items():
for job, results in job_results.items():
if 'success' in results and 'failure' in results:
flaked += 1
break
print('Commits that flaked (passed and failed some job): %d/%d %.2f%%' %
(flaked, len(commits), (flaked*100.0)/len(commits)))
if __name__ == '__main__':
main()
| apache-2.0 |
jnerin/ansible | lib/ansible/module_utils/facts/virtual/linux.py | 1 | 10826 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import glob
import os
import re
from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector
from ansible.module_utils.facts.utils import get_file_content, get_file_lines
class LinuxVirtual(Virtual):
"""
This is a Linux-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'Linux'
# For more information, check: http://people.redhat.com/~rjones/virt-what/
def get_virtual_facts(self):
virtual_facts = {}
# lxc/docker
if os.path.exists('/proc/1/cgroup'):
for line in get_file_lines('/proc/1/cgroup'):
if re.search(r'/docker(/|-[0-9a-f]+\.scope)', line):
virtual_facts['virtualization_type'] = 'docker'
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
if re.search('/lxc/', line) or re.search('/machine.slice/machine-lxc', line):
virtual_facts['virtualization_type'] = 'lxc'
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
# lxc does not always appear in cgroups anymore but sets 'container=lxc' environment var, requires root privs
if os.path.exists('/proc/1/environ'):
for line in get_file_lines('/proc/1/environ'):
if re.search('container=lxc', line):
virtual_facts['virtualization_type'] = 'lxc'
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
if os.path.exists('/proc/vz') and not os.path.exists('/proc/lve'):
virtual_facts['virtualization_type'] = 'openvz'
if os.path.exists('/proc/bc'):
virtual_facts['virtualization_role'] = 'host'
else:
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
systemd_container = get_file_content('/run/systemd/container')
if systemd_container:
virtual_facts['virtualization_type'] = systemd_container
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
if os.path.exists("/proc/xen"):
virtual_facts['virtualization_type'] = 'xen'
virtual_facts['virtualization_role'] = 'guest'
try:
for line in get_file_lines('/proc/xen/capabilities'):
if "control_d" in line:
virtual_facts['virtualization_role'] = 'host'
except IOError:
pass
return virtual_facts
product_name = get_file_content('/sys/devices/virtual/dmi/id/product_name')
if product_name in ['KVM', 'Bochs']:
virtual_facts['virtualization_type'] = 'kvm'
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
if product_name == 'RHEV Hypervisor':
virtual_facts['virtualization_type'] = 'RHEV'
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
if product_name in ['VMware Virtual Platform', 'VMware7,1']:
virtual_facts['virtualization_type'] = 'VMware'
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
if product_name == 'OpenStack Nova':
virtual_facts['virtualization_type'] = 'openstack'
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
if bios_vendor == 'Xen':
virtual_facts['virtualization_type'] = 'xen'
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
if bios_vendor == 'innotek GmbH':
virtual_facts['virtualization_type'] = 'virtualbox'
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor')
# FIXME: This does also match hyperv
if sys_vendor == 'Microsoft Corporation':
virtual_facts['virtualization_type'] = 'VirtualPC'
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
if sys_vendor == 'Parallels Software International Inc.':
virtual_facts['virtualization_type'] = 'parallels'
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
if sys_vendor == 'QEMU':
virtual_facts['virtualization_type'] = 'kvm'
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
if sys_vendor == 'oVirt':
virtual_facts['virtualization_type'] = 'kvm'
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
if sys_vendor == 'OpenStack Foundation':
virtual_facts['virtualization_type'] = 'openstack'
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
if os.path.exists('/proc/self/status'):
for line in get_file_lines('/proc/self/status'):
if re.match(r'^VxID: \d+', line):
virtual_facts['virtualization_type'] = 'linux_vserver'
if re.match(r'^VxID: 0', line):
virtual_facts['virtualization_role'] = 'host'
else:
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
if os.path.exists('/proc/cpuinfo'):
for line in get_file_lines('/proc/cpuinfo'):
if re.match('^model name.*QEMU Virtual CPU', line):
virtual_facts['virtualization_type'] = 'kvm'
elif re.match('^vendor_id.*User Mode Linux', line):
virtual_facts['virtualization_type'] = 'uml'
elif re.match('^model name.*UML', line):
virtual_facts['virtualization_type'] = 'uml'
elif re.match('^machine.*CHRP IBM pSeries .emulated by qemu.', line):
virtual_facts['virtualization_type'] = 'kvm'
elif re.match('^vendor_id.*PowerVM Lx86', line):
virtual_facts['virtualization_type'] = 'powervm_lx86'
elif re.match('^vendor_id.*IBM/S390', line):
virtual_facts['virtualization_type'] = 'PR/SM'
lscpu = self.module.get_bin_path('lscpu')
if lscpu:
rc, out, err = self.module.run_command(["lscpu"])
if rc == 0:
for line in out.splitlines():
data = line.split(":", 1)
key = data[0].strip()
if key == 'Hypervisor':
virtual_facts['virtualization_type'] = data[1].strip()
else:
virtual_facts['virtualization_type'] = 'ibm_systemz'
else:
continue
if virtual_facts['virtualization_type'] == 'PR/SM':
virtual_facts['virtualization_role'] = 'LPAR'
else:
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
# Beware that we can have both kvm and virtualbox running on a single system
if os.path.exists("/proc/modules") and os.access('/proc/modules', os.R_OK):
modules = []
for line in get_file_lines("/proc/modules"):
data = line.split(" ", 1)
modules.append(data[0])
if 'kvm' in modules:
if os.path.isdir('/rhev/'):
# Check whether this is a RHEV hypervisor (is vdsm running ?)
for f in glob.glob('/proc/[0-9]*/comm'):
try:
if open(f).read().rstrip() == 'vdsm':
virtual_facts['virtualization_type'] = 'RHEV'
break
except:
pass
else:
virtual_facts['virtualization_type'] = 'kvm'
else:
virtual_facts['virtualization_type'] = 'kvm'
virtual_facts['virtualization_role'] = 'host'
return virtual_facts
if 'vboxdrv' in modules:
virtual_facts['virtualization_type'] = 'virtualbox'
virtual_facts['virtualization_role'] = 'host'
return virtual_facts
if 'virtio' in modules:
virtual_facts['virtualization_type'] = 'kvm'
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
# In older Linux Kernel versions, /sys filesystem is not available
# dmidecode is the safest option to parse virtualization related values
dmi_bin = self.module.get_bin_path('dmidecode')
(rc, out, err) = self.module.run_command('%s -s system-product-name' % dmi_bin)
if rc == 0:
# Strip out commented lines (specific dmidecode output)
vendor_name = ''.join([line.strip() for line in out.splitlines() if not line.startswith('#')])
if vendor_name in ['VMware Virtual Platform', 'VMware7,1']:
virtual_facts['virtualization_type'] = 'VMware'
virtual_facts['virtualization_role'] = 'guest'
# If none of the above matches, return 'NA' for virtualization_type
# and virtualization_role. This allows for proper grouping.
virtual_facts['virtualization_type'] = 'NA'
virtual_facts['virtualization_role'] = 'NA'
return virtual_facts
class LinuxVirtualCollector(VirtualCollector):
_fact_class = LinuxVirtual
_platform = 'Linux'
| gpl-3.0 |
psi4/mongo_qcdb | qcfractal/tests/test_client.py | 2 | 4656 | """
Tests the interface portal adapter to the REST API
"""
import numpy as np
import pytest
import qcfractal.interface as ptl
from qcfractal.testing import test_server
# All tests should import test_server, but not use it
# Make PyTest aware that this module needs the server
valid_encodings = ["json", "json-ext", "msgpack-ext"]
@pytest.mark.parametrize("encoding", valid_encodings)
def test_client_molecule(test_server, encoding):
client = ptl.FractalClient(test_server)
client._set_encoding(encoding)
water = ptl.data.get_molecule("water_dimer_minima.psimol")
water.geometry[:] += np.random.random(water.geometry.shape)
# Test add
ret = client.add_molecules([water])
# Test get
get_mol = client.query_molecules(id=ret[0])
assert water.compare(get_mol[0])
# Test molecular_formula get
get_mol = client.query_molecules(molecular_formula="H4O2")
assert len(get_mol)
@pytest.mark.parametrize("encoding", valid_encodings)
def test_client_keywords(test_server, encoding):
client = ptl.FractalClient(test_server)
client._set_encoding(encoding)
opt = ptl.models.KeywordSet(values={"one": "fish", "two": encoding})
# Test add
ret = client.add_keywords([opt])
# Test get
get_kw = client.query_keywords([ret[0]])
assert opt == get_kw[0]
get_kw = client.query_keywords(hash_index=[opt.hash_index])
assert opt == get_kw[0]
@pytest.mark.parametrize("encoding", valid_encodings)
def test_client_duplicate_keywords(test_server, encoding):
client = ptl.FractalClient(test_server)
client._set_encoding(encoding)
key_name = f"key-{encoding}"
opt1 = ptl.models.KeywordSet(values={key_name: 1})
opt2 = ptl.models.KeywordSet(values={key_name: 2})
opt3 = ptl.models.KeywordSet(values={key_name: 3})
# Test add
ret = client.add_keywords([opt1, opt1])
assert len(ret) == 2
assert ret[0] == ret[1]
ret2 = client.add_keywords([opt1])
assert len(ret2) == 1
assert ret2[0] == ret[0]
ret3 = client.add_keywords([opt2, opt1, opt3])
assert len(ret3) == 3
assert ret3[1] == ret[0]
@pytest.mark.parametrize("encoding", valid_encodings)
def test_empty_query(test_server, encoding):
client = ptl.FractalClient(test_server)
client._set_encoding(encoding)
with pytest.raises(IOError) as error:
client.query_procedures(limit=1)
assert "ID is required" in str(error.value)
@pytest.mark.parametrize("encoding", valid_encodings)
def test_collection_portal(test_server, encoding):
db_name = f"Torsion123-{encoding}"
db = {
"collection": "torsiondrive",
"name": db_name,
"something": "else",
"array": ["12345"],
"visibility": True,
"view_available": False,
"group": "default",
}
client = ptl.FractalClient(test_server)
client._set_encoding(encoding)
# Test add
ret = client.add_collection(db, full_return=True)
print(ret)
# Test get
get_db = client.get_collection(db["collection"], db["name"], full_return=True)
db_id = get_db.data[0].pop("id")
# got a default values when created
get_db.data[0].pop("tags", None)
get_db.data[0].pop("tagline", None)
get_db.data[0].pop("provenance", None)
get_db.data[0].pop("view_url_hdf5", None)
get_db.data[0].pop("view_url_plaintext", None)
get_db.data[0].pop("view_metadata", None)
get_db.data[0].pop("description", None)
assert db == get_db.data[0]
# Test add w/o overwrite
ret = client.add_collection(db, full_return=True)
assert ret.meta.success is False
# Test that client is smart enough to trap non-id'ed overwrites
with pytest.raises(KeyError):
_ = client.add_collection(db, overwrite=True)
# Test that we cannot use a local key
db["id"] = "local"
db["array"] = ["6789"]
with pytest.raises(KeyError):
_ = client.add_collection(db, overwrite=True)
# Finally test that we can overwrite
db["id"] = db_id
r = client.add_collection(db, overwrite=True)
get_db = client.get_collection(db["collection"], db["name"], full_return=True)
assert get_db.data[0]["array"] == ["6789"]
@pytest.mark.parametrize("encoding", valid_encodings)
def test_custom_queries(test_server, encoding):
""" Test the round trip between client and server in custom queries"""
client = ptl.FractalClient(test_server)
client._set_encoding(encoding)
# Dummy test, not found
ret = client.custom_query("optimization", "final_result", {"optimization_ids": [1]}, full_return=True)
assert ret.meta.success
assert ret.meta.n_found == 0
| bsd-3-clause |
aselle/tensorflow | tensorflow/contrib/tpu/python/profiler/__init__.py | 33 | 1246 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Classes for TPU trace events."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,unused-import
from tensorflow.contrib.tpu.profiler.tpu_profiler_analysis_pb2 import *
from tensorflow.contrib.tpu.profiler.trace_events_pb2 import *
# pylint: enable=wildcard-import,unused-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['Trace', 'Resource', 'Device', 'TraceEvent']
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
ecederstrand/django | tests/forms_tests/tests/tests.py | 89 | 16458 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import models
from django.forms import (
CharField, FileField, Form, ModelChoiceField, ModelForm,
)
from django.forms.models import ModelFormMetaclass
from django.test import SimpleTestCase, TestCase
from django.utils import six
from ..models import (
BoundaryModel, ChoiceFieldModel, ChoiceModel, ChoiceOptionModel, Defaults,
FileModel, Group, OptionalMultiChoiceModel,
)
class ChoiceFieldForm(ModelForm):
class Meta:
model = ChoiceFieldModel
fields = '__all__'
class OptionalMultiChoiceModelForm(ModelForm):
class Meta:
model = OptionalMultiChoiceModel
fields = '__all__'
class ChoiceFieldExclusionForm(ModelForm):
multi_choice = CharField(max_length=50)
class Meta:
exclude = ['multi_choice']
model = ChoiceFieldModel
class EmptyCharLabelChoiceForm(ModelForm):
class Meta:
model = ChoiceModel
fields = ['name', 'choice']
class EmptyIntegerLabelChoiceForm(ModelForm):
class Meta:
model = ChoiceModel
fields = ['name', 'choice_integer']
class EmptyCharLabelNoneChoiceForm(ModelForm):
class Meta:
model = ChoiceModel
fields = ['name', 'choice_string_w_none']
class FileForm(Form):
file1 = FileField()
class TestTicket12510(TestCase):
''' It is not necessary to generate choices for ModelChoiceField (regression test for #12510). '''
def setUp(self):
self.groups = [Group.objects.create(name=name) for name in 'abc']
def test_choices_not_fetched_when_not_rendering(self):
# only one query is required to pull the model from DB
with self.assertNumQueries(1):
field = ModelChoiceField(Group.objects.order_by('-name'))
self.assertEqual('a', field.clean(self.groups[0].pk).name)
class TestTicket14567(TestCase):
"""
Check that the return values of ModelMultipleChoiceFields are QuerySets
"""
def test_empty_queryset_return(self):
"If a model's ManyToManyField has blank=True and is saved with no data, a queryset is returned."
option = ChoiceOptionModel.objects.create(name='default')
form = OptionalMultiChoiceModelForm({'multi_choice_optional': '', 'multi_choice': [option.pk]})
self.assertTrue(form.is_valid())
# Check that the empty value is a QuerySet
self.assertIsInstance(form.cleaned_data['multi_choice_optional'], models.query.QuerySet)
# While we're at it, test whether a QuerySet is returned if there *is* a value.
self.assertIsInstance(form.cleaned_data['multi_choice'], models.query.QuerySet)
class ModelFormCallableModelDefault(TestCase):
def test_no_empty_option(self):
"If a model's ForeignKey has blank=False and a default, no empty option is created (Refs #10792)."
option = ChoiceOptionModel.objects.create(name='default')
choices = list(ChoiceFieldForm().fields['choice'].choices)
self.assertEqual(len(choices), 1)
self.assertEqual(choices[0], (option.pk, six.text_type(option)))
def test_callable_initial_value(self):
"The initial value for a callable default returning a queryset is the pk (refs #13769)"
ChoiceOptionModel.objects.create(id=1, name='default')
ChoiceOptionModel.objects.create(id=2, name='option 2')
ChoiceOptionModel.objects.create(id=3, name='option 3')
self.assertHTMLEqual(
ChoiceFieldForm().as_p(),
"""<p><label for="id_choice">Choice:</label> <select name="choice" id="id_choice">
<option value="1" selected="selected">ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice" value="1" id="initial-id_choice" /></p>
<p><label for="id_choice_int">Choice int:</label> <select name="choice_int" id="id_choice_int">
<option value="1" selected="selected">ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice_int" value="1" id="initial-id_choice_int" /></p>
<p><label for="id_multi_choice">Multi choice:</label>
<select multiple="multiple" name="multi_choice" id="id_multi_choice">
<option value="1" selected="selected">ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice" value="1" id="initial-id_multi_choice_0" /></p>
<p><label for="id_multi_choice_int">Multi choice int:</label>
<select multiple="multiple" name="multi_choice_int" id="id_multi_choice_int">
<option value="1" selected="selected">ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice_int" value="1" id="initial-id_multi_choice_int_0" /></p>"""
)
def test_initial_instance_value(self):
"Initial instances for model fields may also be instances (refs #7287)"
ChoiceOptionModel.objects.create(id=1, name='default')
obj2 = ChoiceOptionModel.objects.create(id=2, name='option 2')
obj3 = ChoiceOptionModel.objects.create(id=3, name='option 3')
self.assertHTMLEqual(
ChoiceFieldForm(initial={
'choice': obj2,
'choice_int': obj2,
'multi_choice': [obj2, obj3],
'multi_choice_int': ChoiceOptionModel.objects.exclude(name="default"),
}).as_p(),
"""<p><label for="id_choice">Choice:</label> <select name="choice" id="id_choice">
<option value="1">ChoiceOption 1</option>
<option value="2" selected="selected">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice" value="2" id="initial-id_choice" /></p>
<p><label for="id_choice_int">Choice int:</label> <select name="choice_int" id="id_choice_int">
<option value="1">ChoiceOption 1</option>
<option value="2" selected="selected">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice_int" value="2" id="initial-id_choice_int" /></p>
<p><label for="id_multi_choice">Multi choice:</label>
<select multiple="multiple" name="multi_choice" id="id_multi_choice">
<option value="1">ChoiceOption 1</option>
<option value="2" selected="selected">ChoiceOption 2</option>
<option value="3" selected="selected">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice" value="2" id="initial-id_multi_choice_0" />
<input type="hidden" name="initial-multi_choice" value="3" id="initial-id_multi_choice_1" /></p>
<p><label for="id_multi_choice_int">Multi choice int:</label>
<select multiple="multiple" name="multi_choice_int" id="id_multi_choice_int">
<option value="1">ChoiceOption 1</option>
<option value="2" selected="selected">ChoiceOption 2</option>
<option value="3" selected="selected">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice_int" value="2" id="initial-id_multi_choice_int_0" />
<input type="hidden" name="initial-multi_choice_int" value="3" id="initial-id_multi_choice_int_1" /></p>"""
)
class FormsModelTestCase(TestCase):
def test_unicode_filename(self):
# FileModel with unicode filename and data #########################
file1 = SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह'.encode('utf-8'))
f = FileForm(data={}, files={'file1': file1}, auto_id=False)
self.assertTrue(f.is_valid())
self.assertIn('file1', f.cleaned_data)
m = FileModel.objects.create(file=f.cleaned_data['file1'])
self.assertEqual(m.file.name, 'tests/\u6211\u96bb\u6c23\u588a\u8239\u88dd\u6eff\u6652\u9c54.txt')
m.delete()
def test_boundary_conditions(self):
# Boundary conditions on a PostitiveIntegerField #########################
class BoundaryForm(ModelForm):
class Meta:
model = BoundaryModel
fields = '__all__'
f = BoundaryForm({'positive_integer': 100})
self.assertTrue(f.is_valid())
f = BoundaryForm({'positive_integer': 0})
self.assertTrue(f.is_valid())
f = BoundaryForm({'positive_integer': -100})
self.assertFalse(f.is_valid())
def test_formfield_initial(self):
# Formfield initial values ########
# If the model has default values for some fields, they are used as the formfield
# initial values.
class DefaultsForm(ModelForm):
class Meta:
model = Defaults
fields = '__all__'
self.assertEqual(DefaultsForm().fields['name'].initial, 'class default value')
self.assertEqual(DefaultsForm().fields['def_date'].initial, datetime.date(1980, 1, 1))
self.assertEqual(DefaultsForm().fields['value'].initial, 42)
r1 = DefaultsForm()['callable_default'].as_widget()
r2 = DefaultsForm()['callable_default'].as_widget()
self.assertNotEqual(r1, r2)
# In a ModelForm that is passed an instance, the initial values come from the
# instance's values, not the model's defaults.
foo_instance = Defaults(name='instance value', def_date=datetime.date(1969, 4, 4), value=12)
instance_form = DefaultsForm(instance=foo_instance)
self.assertEqual(instance_form.initial['name'], 'instance value')
self.assertEqual(instance_form.initial['def_date'], datetime.date(1969, 4, 4))
self.assertEqual(instance_form.initial['value'], 12)
from django.forms import CharField
class ExcludingForm(ModelForm):
name = CharField(max_length=255)
class Meta:
model = Defaults
exclude = ['name', 'callable_default']
f = ExcludingForm({'name': 'Hello', 'value': 99, 'def_date': datetime.date(1999, 3, 2)})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], 'Hello')
obj = f.save()
self.assertEqual(obj.name, 'class default value')
self.assertEqual(obj.value, 99)
self.assertEqual(obj.def_date, datetime.date(1999, 3, 2))
class RelatedModelFormTests(SimpleTestCase):
def test_invalid_loading_order(self):
"""
Test for issue 10405
"""
class A(models.Model):
ref = models.ForeignKey("B", models.CASCADE)
class Meta:
model = A
fields = '__all__'
self.assertRaises(ValueError, ModelFormMetaclass, str('Form'), (ModelForm,), {'Meta': Meta})
class B(models.Model):
pass
def test_valid_loading_order(self):
"""
Test for issue 10405
"""
class C(models.Model):
ref = models.ForeignKey("D", models.CASCADE)
class D(models.Model):
pass
class Meta:
model = C
fields = '__all__'
self.assertTrue(issubclass(ModelFormMetaclass(str('Form'), (ModelForm,), {'Meta': Meta}), ModelForm))
class ManyToManyExclusionTestCase(TestCase):
def test_m2m_field_exclusion(self):
# Issue 12337. save_instance should honor the passed-in exclude keyword.
opt1 = ChoiceOptionModel.objects.create(id=1, name='default')
opt2 = ChoiceOptionModel.objects.create(id=2, name='option 2')
opt3 = ChoiceOptionModel.objects.create(id=3, name='option 3')
initial = {
'choice': opt1,
'choice_int': opt1,
}
data = {
'choice': opt2.pk,
'choice_int': opt2.pk,
'multi_choice': 'string data!',
'multi_choice_int': [opt1.pk],
}
instance = ChoiceFieldModel.objects.create(**initial)
instance.multi_choice = instance.multi_choice_int = [opt2, opt3]
form = ChoiceFieldExclusionForm(data=data, instance=instance)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['multi_choice'], data['multi_choice'])
form.save()
self.assertEqual(form.instance.choice.pk, data['choice'])
self.assertEqual(form.instance.choice_int.pk, data['choice_int'])
self.assertEqual(list(form.instance.multi_choice.all()), [opt2, opt3])
self.assertEqual([obj.pk for obj in form.instance.multi_choice_int.all()], data['multi_choice_int'])
class EmptyLabelTestCase(TestCase):
def test_empty_field_char(self):
f = EmptyCharLabelChoiceForm()
self.assertHTMLEqual(
f.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" maxlength="10" name="name" type="text" /></p>
<p><label for="id_choice">Choice:</label> <select id="id_choice" name="choice">
<option value="" selected="selected">No Preference</option>
<option value="f">Foo</option>
<option value="b">Bar</option>
</select></p>"""
)
def test_empty_field_char_none(self):
f = EmptyCharLabelNoneChoiceForm()
self.assertHTMLEqual(
f.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" maxlength="10" name="name" type="text" /></p>
<p><label for="id_choice_string_w_none">Choice string w none:</label>
<select id="id_choice_string_w_none" name="choice_string_w_none">
<option value="" selected="selected">No Preference</option>
<option value="f">Foo</option>
<option value="b">Bar</option>
</select></p>"""
)
def test_save_empty_label_forms(self):
# Test that saving a form with a blank choice results in the expected
# value being stored in the database.
tests = [
(EmptyCharLabelNoneChoiceForm, 'choice_string_w_none', None),
(EmptyIntegerLabelChoiceForm, 'choice_integer', None),
(EmptyCharLabelChoiceForm, 'choice', ''),
]
for form, key, expected in tests:
f = form({'name': 'some-key', key: ''})
self.assertTrue(f.is_valid())
m = f.save()
self.assertEqual(expected, getattr(m, key))
self.assertEqual('No Preference',
getattr(m, 'get_{}_display'.format(key))())
def test_empty_field_integer(self):
f = EmptyIntegerLabelChoiceForm()
self.assertHTMLEqual(
f.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" maxlength="10" name="name" type="text" /></p>
<p><label for="id_choice_integer">Choice integer:</label>
<select id="id_choice_integer" name="choice_integer">
<option value="" selected="selected">No Preference</option>
<option value="1">Foo</option>
<option value="2">Bar</option>
</select></p>"""
)
def test_get_display_value_on_none(self):
m = ChoiceModel.objects.create(name='test', choice='', choice_integer=None)
self.assertIsNone(m.choice_integer)
self.assertEqual('No Preference', m.get_choice_integer_display())
def test_html_rendering_of_prepopulated_models(self):
none_model = ChoiceModel(name='none-test', choice_integer=None)
f = EmptyIntegerLabelChoiceForm(instance=none_model)
self.assertHTMLEqual(
f.as_p(),
"""<p><label for="id_name">Name:</label>
<input id="id_name" maxlength="10" name="name" type="text" value="none-test"/></p>
<p><label for="id_choice_integer">Choice integer:</label>
<select id="id_choice_integer" name="choice_integer">
<option value="" selected="selected">No Preference</option>
<option value="1">Foo</option>
<option value="2">Bar</option>
</select></p>"""
)
foo_model = ChoiceModel(name='foo-test', choice_integer=1)
f = EmptyIntegerLabelChoiceForm(instance=foo_model)
self.assertHTMLEqual(
f.as_p(),
"""<p><label for="id_name">Name:</label>
<input id="id_name" maxlength="10" name="name" type="text" value="foo-test"/></p>
<p><label for="id_choice_integer">Choice integer:</label>
<select id="id_choice_integer" name="choice_integer">
<option value="">No Preference</option>
<option value="1" selected="selected">Foo</option>
<option value="2">Bar</option>
</select></p>"""
)
| bsd-3-clause |
hradec/gaffer | python/GafferVDBUI/SphereLevelSetUI.py | 8 | 2544 | ##########################################################################
#
# Copyright (c) 2020, Don Boogert. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of Don Boogert nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import GafferUI
import GafferVDB
GafferUI.Metadata.registerNode(
GafferVDB.SphereLevelSet,
'description',
"""Creates a sphere level set.""",
plugs={
'grid' : [
'description',
"""
The name of the sphere levelset grid in the created VDB object.
"""
],
"radius" : [
"description",
"""
Sphere radius in object space units.
"""
],
"center" : [
"description",
"""
Local center of the sphere level set in object space.
"""
],
"voxelSize" : [
"description",
"""
Size of the voxels in the created sphere levelset. Smaller voxel results in more detail but higher memory usage.
"""
],
"halfWidth" : [
"description",
"""
Width of the signed distance field in voxels.
"""
],
}
)
| bsd-3-clause |
isghe/cjdns | node_build/dependencies/libuv/build/gyp/tools/pretty_sln.py | 1831 | 5099 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints the information in a sln file in a diffable way.
It first outputs each projects in alphabetical order with their
dependencies.
Then it outputs a possible build order.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import re
import sys
import pretty_vcproj
def BuildProject(project, built, projects, deps):
# if all dependencies are done, we can build it, otherwise we try to build the
# dependency.
# This is not infinite-recursion proof.
for dep in deps[project]:
if dep not in built:
BuildProject(dep, built, projects, deps)
print project
built.append(project)
def ParseSolution(solution_file):
# All projects, their clsid and paths.
projects = dict()
# A list of dependencies associated with a project.
dependencies = dict()
# Regular expressions that matches the SLN format.
# The first line of a project definition.
begin_project = re.compile(r'^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
r'}"\) = "(.*)", "(.*)", "(.*)"$')
# The last line of a project definition.
end_project = re.compile('^EndProject$')
# The first line of a dependency list.
begin_dep = re.compile(
r'ProjectSection\(ProjectDependencies\) = postProject$')
# The last line of a dependency list.
end_dep = re.compile('EndProjectSection$')
# A line describing a dependency.
dep_line = re.compile(' *({.*}) = ({.*})$')
in_deps = False
solution = open(solution_file)
for line in solution:
results = begin_project.search(line)
if results:
# Hack to remove icu because the diff is too different.
if results.group(1).find('icu') != -1:
continue
# We remove "_gyp" from the names because it helps to diff them.
current_project = results.group(1).replace('_gyp', '')
projects[current_project] = [results.group(2).replace('_gyp', ''),
results.group(3),
results.group(2)]
dependencies[current_project] = []
continue
results = end_project.search(line)
if results:
current_project = None
continue
results = begin_dep.search(line)
if results:
in_deps = True
continue
results = end_dep.search(line)
if results:
in_deps = False
continue
results = dep_line.search(line)
if results and in_deps and current_project:
dependencies[current_project].append(results.group(1))
continue
# Change all dependencies clsid to name instead.
for project in dependencies:
# For each dependencies in this project
new_dep_array = []
for dep in dependencies[project]:
# Look for the project name matching this cldis
for project_info in projects:
if projects[project_info][1] == dep:
new_dep_array.append(project_info)
dependencies[project] = sorted(new_dep_array)
return (projects, dependencies)
def PrintDependencies(projects, deps):
print "---------------------------------------"
print "Dependencies for all projects"
print "---------------------------------------"
print "-- --"
for (project, dep_list) in sorted(deps.items()):
print "Project : %s" % project
print "Path : %s" % projects[project][0]
if dep_list:
for dep in dep_list:
print " - %s" % dep
print ""
print "-- --"
def PrintBuildOrder(projects, deps):
print "---------------------------------------"
print "Build order "
print "---------------------------------------"
print "-- --"
built = []
for (project, _) in sorted(deps.items()):
if project not in built:
BuildProject(project, built, projects, deps)
print "-- --"
def PrintVCProj(projects):
for project in projects:
print "-------------------------------------"
print "-------------------------------------"
print project
print project
print project
print "-------------------------------------"
print "-------------------------------------"
project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]),
projects[project][2]))
pretty = pretty_vcproj
argv = [ '',
project_path,
'$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]),
]
argv.extend(sys.argv[3:])
pretty.main(argv)
def main():
# check if we have exactly 1 parameter.
if len(sys.argv) < 2:
print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0]
return 1
(projects, deps) = ParseSolution(sys.argv[1])
PrintDependencies(projects, deps)
PrintBuildOrder(projects, deps)
if '--recursive' in sys.argv:
PrintVCProj(projects)
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
ehudmagal/robotqcapp | boto/ec2/autoscale/activity.py | 57 | 3059 | # Copyright (c) 2009-2011 Reza Lotun http://reza.lotun.name/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from datetime import datetime
class Activity(object):
def __init__(self, connection=None):
self.connection = connection
self.start_time = None
self.end_time = None
self.activity_id = None
self.progress = None
self.status_code = None
self.cause = None
self.description = None
self.status_message = None
self.group_name = None
def __repr__(self):
return 'Activity<%s>: For group:%s, progress:%s, cause:%s' % (self.activity_id,
self.group_name,
self.status_message,
self.cause)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'ActivityId':
self.activity_id = value
elif name == 'AutoScalingGroupName':
self.group_name = value
elif name == 'StartTime':
try:
self.start_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
self.start_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
elif name == 'EndTime':
try:
self.end_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
self.end_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
elif name == 'Progress':
self.progress = value
elif name == 'Cause':
self.cause = value
elif name == 'Description':
self.description = value
elif name == 'StatusMessage':
self.status_message = value
elif name == 'StatusCode':
self.status_code = value
else:
setattr(self, name, value)
| bsd-3-clause |
roadmapper/ansible | test/units/modules/network/cnos/test_cnos_l2_interface.py | 23 | 5128 | #
# (c) 2018 Lenovo.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.cnos import cnos_l2_interface
from units.modules.utils import set_module_args
from .cnos_module import TestCnosModule, load_fixture
class TestCnosL2InterfaceModule(TestCnosModule):
module = cnos_l2_interface
def setUp(self):
super(TestCnosL2InterfaceModule, self).setUp()
self._patch_get_config = patch(
'ansible.modules.network.cnos.cnos_l2_interface.get_config'
)
self._patch_load_config = patch(
'ansible.modules.network.cnos.cnos_l2_interface.load_config'
)
self._patch_run_commands = patch(
'ansible.modules.network.cnos.cnos_l2_interface.run_commands'
)
self._get_config = self._patch_get_config.start()
self._load_config = self._patch_load_config.start()
self._run_commands = self._patch_run_commands.start()
self._run_commands.side_effect = self.run_commands_load_fixtures
def run_commands_load_fixtures(self, module, commands, *args, **kwargs):
return self.load_fixtures(
commands,
destination=self._run_commands,
return_values=True
)
def tearDown(self):
super(TestCnosL2InterfaceModule, self).tearDown()
self._patch_get_config.stop()
self._patch_load_config.stop()
self._patch_run_commands.stop()
def load_fixtures(self, commands=None,
destination=None, return_values=False):
side_effects = []
if not destination:
destination = self._get_config
if not commands:
commands = ['cnos_config_config.cfg']
for command in commands:
filename = str(command).replace(' ', '_')
filename = str(filename).replace('/', '_')
side_effects.append(load_fixture(filename))
if return_values is True:
return side_effects
destination.side_effect = side_effects
return None
def test_cnos_l2_interface_access_vlan(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 1/33',
mode='access',
access_vlan=13,
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface ethernet 1/33',
'switchport access vlan 13'
],
'changed': True,
'warnings': []
}
)
def test_cnos_l2_interface_vlan_does_not_exist(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 1/33',
mode='access',
access_vlan=10,
))
result = self.execute_module(failed=True)
self.assertEqual(
result,
{
'msg': 'You are trying to configure a VLAN on an interface '
'that\ndoes not exist on the switch yet!',
'failed': True,
'vlan': '10'
}
)
def test_cnos_l2_interface_incorrect_state(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 1/44',
mode='access',
access_vlan=10,
))
result = self.execute_module(failed=True)
self.assertEqual(
result,
{
'msg': 'Ensure interface is configured to be a L2\nport first '
'before using this module. You can use\nthe cnos_'
'interface module for this.',
'failed': True
}
)
def test_cnos_l2_interface_trunk(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 1/45',
mode='trunk',
native_vlan='12',
trunk_allowed_vlans='13,12'
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface ethernet 1/45',
'switchport mode trunk',
'switchport trunk allowed vlan 13,12',
'switchport trunk native vlan 12'
],
'changed': True,
'warnings': []
}
)
| gpl-3.0 |
Lh4cKg/sl4a | python/src/Tools/scripts/ifdef.py | 66 | 3717 | #! /usr/bin/env python
# Selectively preprocess #ifdef / #ifndef statements.
# Usage:
# ifdef [-Dname] ... [-Uname] ... [file] ...
#
# This scans the file(s), looking for #ifdef and #ifndef preprocessor
# commands that test for one of the names mentioned in the -D and -U
# options. On standard output it writes a copy of the input file(s)
# minus those code sections that are suppressed by the selected
# combination of defined/undefined symbols. The #if(n)def/#else/#else
# lines themselfs (if the #if(n)def tests for one of the mentioned
# names) are removed as well.
# Features: Arbitrary nesting of recognized and unrecognized
# preprocesor statements works correctly. Unrecognized #if* commands
# are left in place, so it will never remove too much, only too
# little. It does accept whitespace around the '#' character.
# Restrictions: There should be no comments or other symbols on the
# #if(n)def lines. The effect of #define/#undef commands in the input
# file or in included files is not taken into account. Tests using
# #if and the defined() pseudo function are not recognized. The #elif
# command is not recognized. Improperly nesting is not detected.
# Lines that look like preprocessor commands but which are actually
# part of comments or string literals will be mistaken for
# preprocessor commands.
import sys
import getopt
defs = []
undefs = []
def main():
opts, args = getopt.getopt(sys.argv[1:], 'D:U:')
for o, a in opts:
if o == '-D':
defs.append(a)
if o == '-U':
undefs.append(a)
if not args:
args = ['-']
for filename in args:
if filename == '-':
process(sys.stdin, sys.stdout)
else:
f = open(filename, 'r')
process(f, sys.stdout)
f.close()
def process(fpi, fpo):
keywords = ('if', 'ifdef', 'ifndef', 'else', 'endif')
ok = 1
stack = []
while 1:
line = fpi.readline()
if not line: break
while line[-2:] == '\\\n':
nextline = fpi.readline()
if not nextline: break
line = line + nextline
tmp = line.strip()
if tmp[:1] != '#':
if ok: fpo.write(line)
continue
tmp = tmp[1:].strip()
words = tmp.split()
keyword = words[0]
if keyword not in keywords:
if ok: fpo.write(line)
continue
if keyword in ('ifdef', 'ifndef') and len(words) == 2:
if keyword == 'ifdef':
ko = 1
else:
ko = 0
word = words[1]
if word in defs:
stack.append((ok, ko, word))
if not ko: ok = 0
elif word in undefs:
stack.append((ok, not ko, word))
if ko: ok = 0
else:
stack.append((ok, -1, word))
if ok: fpo.write(line)
elif keyword == 'if':
stack.append((ok, -1, ''))
if ok: fpo.write(line)
elif keyword == 'else' and stack:
s_ok, s_ko, s_word = stack[-1]
if s_ko < 0:
if ok: fpo.write(line)
else:
s_ko = not s_ko
ok = s_ok
if not s_ko: ok = 0
stack[-1] = s_ok, s_ko, s_word
elif keyword == 'endif' and stack:
s_ok, s_ko, s_word = stack[-1]
if s_ko < 0:
if ok: fpo.write(line)
del stack[-1]
ok = s_ok
else:
sys.stderr.write('Unknown keyword %s\n' % keyword)
if stack:
sys.stderr.write('stack: %s\n' % stack)
if __name__ == '__main__':
main()
| apache-2.0 |
jamiefolsom/edx-platform | common/test/acceptance/performance/test_studio_performance.py | 139 | 3307 | """
Single page performance tests for Studio.
"""
from bok_choy.web_app_test import WebAppTest, with_cache
from ..pages.studio.auto_auth import AutoAuthPage
from ..pages.studio.overview import CourseOutlinePage
from nose.plugins.attrib import attr
@attr(har_mode='explicit')
class StudioPagePerformanceTest(WebAppTest):
"""
Base class to capture studio performance with HTTP Archives.
To import courses for the bok choy tests, pass the --imports_dir=<course directory> argument to the paver command
where <course directory> contains the (un-archived) courses to be imported.
"""
course_org = 'edX'
course_num = 'Open_DemoX'
course_run = 'edx_demo_course'
def setUp(self):
"""
Authenticate as staff so we can view and edit courses.
"""
super(StudioPagePerformanceTest, self).setUp()
AutoAuthPage(self.browser, staff=True).visit()
def record_visit_outline(self):
"""
Produce a HAR for loading the course outline page.
"""
course_outline_page = CourseOutlinePage(self.browser, self.course_org, self.course_num, self.course_run)
har_name = 'OutlinePage_{org}_{course}'.format(
org=self.course_org,
course=self.course_num
)
self.har_capturer.add_page(self.browser, har_name)
course_outline_page.visit()
self.har_capturer.save_har(self.browser, har_name)
def record_visit_unit(self, section_title, subsection_title, unit_title):
"""
Produce a HAR for loading a unit page.
"""
course_outline_page = CourseOutlinePage(self.browser, self.course_org, self.course_num, self.course_run).visit()
course_outline_unit = course_outline_page.section(section_title).subsection(subsection_title).expand_subsection().unit(unit_title)
har_name = 'UnitPage_{org}_{course}'.format(
org=self.course_org,
course=self.course_num
)
self.har_capturer.add_page(self.browser, har_name)
course_outline_unit.go_to()
self.har_capturer.save_har(self.browser, har_name)
class StudioJusticePerformanceTest(StudioPagePerformanceTest):
"""
Test performance on the HarvardX Justice course.
"""
course_org = 'HarvardX'
course_num = 'ER22x'
course_run = '2013_Spring'
@with_cache
def test_visit_outline(self):
"""Record visiting the Justice course outline page"""
self.record_visit_outline()
@with_cache
def test_visit_unit(self):
"""Record visiting a Justice unit page"""
self.record_visit_unit(
'Lecture 1 - Doing the Right Thing',
'Discussion Prompt: Ethics of Torture',
'Discussion Prompt: Ethics of Torture'
)
class StudioPub101PerformanceTest(StudioPagePerformanceTest):
"""
Test performance on Andy's PUB101 outline page.
"""
course_org = 'AndyA'
course_num = 'PUB101'
course_run = 'PUB101'
@with_cache
def test_visit_outline(self):
"""Record visiting the PUB101 course outline page"""
self.record_visit_outline()
@with_cache
def test_visit_unit(self):
"""Record visiting the PUB101 unit page"""
self.record_visit_unit('Released', 'Released', 'Released')
| agpl-3.0 |
zerberros/tercera_mano | tercera_mano.py | 1 | 9083 | #!/usr/bin/python
###############################################################################
# Copyleft (c) 2013 Jairo Estefanía. Some rights reserved. #
# This program or module is free software: you can redistribute it and/or #
# modify it under the terms of the Lesser GNU General Public Licence (LGPL) #
# as published by the Free Software Foundation. #
# It is provided for educational purposes and is distributed in the hope that #
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied #
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See #
# the Leasser GNU General Public Licence for more details. #
###############################################################################
#codigo para transformar el archivo de Qt-designer a un script python:
#pyside-uic -o tercera_manoUI.py ./gui/tercera-manoUI.ui
#importar las bibliotecas necesarias:
import sys
import platform # Test
import PySide # Test
import sympy # Test
from PySide import QtCore, QtGui
from PySide.QtGui import QApplication, QMainWindow, QTextEdit,\
QPushButton, QMessageBox # test
from tercera_manoUI import Ui_MainWindow
from sympy import integrate, erf, exp, sin, sqrt, log, oo, pi, sinh, symbols
from sympy import *
__version__ = '0.0.3'
class ControlMainWindow(QtGui.QMainWindow):
''' Do not have any help for now '''
def sumaserie(self, x, y):
return (x + y)
def sumaparalelo(self, x, y):
return ((x * y) / (x + y))
def asociacion_de_impedancias(self):
a = complex(self.ui.lineEdit.text())
b = complex(self.ui.lineEdit_2.text())
self.ui.label_3.setText("Valor Serie: " + str(self.sumaserie(a, b)))
self.ui.label_4.setText("Valor Paralelo: "
+ str(self.sumaparalelo(a, b)))
def calcula_de_impedancias(self):
freq = complex(self.ui.lE_cz_freq.text())
val = complex(0, float(self.ui.lE_cz_val.text()))
# cB_cz == comboBox de calculo de impedancias
if self.ui.cB_cz.currentText() == 'Condensador':
sol = 1 / (2 * pi * freq * val) # *complex(j))
# self.ui.lb_cz_sol.setText(str(sol))
if self.ui.cB_cz.currentText() == 'Bobina':
sol = 2 * pi * freq * val # *complex(j)
self.ui.lb_cz_sol.setText(str(sol.evalf()))
print(("algo es algo {} + {}".format(freq, val)))
def estrella_triangulo(self):
# lE_dt == entrada de texto (lineEdit) de estrella triangulo
z1 = complex(self.ui.lE_dt_1.text())
z2 = complex(self.ui.lE_dt_2.text())
z3 = complex(self.ui.lE_dt_3.text())
if self.ui.cB_dt.currentText() == 'Estrella -> Triángulo':
Z1 = (z2 + z3 + (z2 * z3) / z1)
Z2 = (z1 + z3 + (z1 * z3) / z2)
Z3 = (z1 + z2 + (z1 * z2) / z3)
elif self.ui.cB_dt.currentText() == 'Triángulo -> Estrella':
Z1 = (z2 * z3) / (z1 + z2 + z3)
Z2 = (z1 * z3) / (z1 + z2 + z3)
Z3 = (z1 * z2) / (z1 + z2 + z3)
self.ui.lb_dt_1.setText(str(Z1))
self.ui.lb_dt_2.setText(str(Z2))
self.ui.lb_dt_3.setText(str(Z3))
def integrar(self):
f = self.ui.lineEdit_int_funcion.text()
f_var = [Symbol(self.ui.lineEdit_int_var_1.text()),
Symbol(self.ui.lineEdit_int_var_2.text()),
Symbol(self.ui.lineEdit_int_var_3.text())]
f_min = [(self.ui.lineEdit_int_min_1.text()),
(self.ui.lineEdit_int_min_2.text()),
(self.ui.lineEdit_int_min_3.text())]
f_max = [(self.ui.lineEdit_int_max_1.text()),
(self.ui.lineEdit_int_max_2.text()),
(self.ui.lineEdit_int_max_3.text())]
for i in range(0, int(self.ui.spinBox_int.text())):
if self.ui.radioButton_int_def.isChecked() is True:
f = integrate(f, (f_var[i], f_min[i], f_max[i]))
self.ui.label_int_solucion.setText(str(f))
print (f)
else:
f = integrate(f, f_var[i])
self.ui.label_int_solucion.setText(str(f))
print (f)
def derivar(self):
f = self.ui.lineEdit_int_funcion.text()
f_var = [Symbol(self.ui.lineEdit_int_var_1.text()),
Symbol(self.ui.lineEdit_int_var_2.text()),
Symbol(self.ui.lineEdit_int_var_3.text())]
for i in range(0, int(self.ui.spinBox_int.text())):
f = diff(f, f_var[i])
self.ui.label_int_solucion.setText(str(f))
def test_spin(self):
# Esto es para desactibar las entradas de texto de los limites
# y variables de integracion que no se vayan a usar,
# porque se quiera hacer una integral simple, por ejemplo.
if int(self.ui.spinBox_int.text()) == 1:
self.ui.label_int_text_2.setEnabled(False)
self.ui.label_int_min_2.setEnabled(False)
self.ui.label_int_max_2.setEnabled(False)
self.ui.lineEdit_int_var_2.setEnabled(False)
self.ui.lineEdit_int_min_2.setEnabled(False)
self.ui.lineEdit_int_max_2.setEnabled(False)
self.ui.label_int_text_3.setEnabled(False)
self.ui.label_int_min_3.setEnabled(False)
self.ui.label_int_max_3.setEnabled(False)
self.ui.lineEdit_int_var_3.setEnabled(False)
self.ui.lineEdit_int_min_3.setEnabled(False)
self.ui.lineEdit_int_max_3.setEnabled(False)
elif int(self.ui.spinBox_int.text()) == 2:
self.ui.label_int_text_2.setEnabled(True)
self.ui.label_int_min_2.setEnabled(True)
self.ui.label_int_max_2.setEnabled(True)
self.ui.lineEdit_int_var_2.setEnabled(True)
self.ui.lineEdit_int_min_2.setEnabled(True)
self.ui.lineEdit_int_max_2.setEnabled(True)
self.ui.label_int_text_3.setEnabled(False)
self.ui.label_int_min_3.setEnabled(False)
self.ui.label_int_max_3.setEnabled(False)
self.ui.lineEdit_int_var_3.setEnabled(False)
self.ui.lineEdit_int_min_3.setEnabled(False)
self.ui.lineEdit_int_max_3.setEnabled(False)
elif int(self.ui.spinBox_int.text()) == 3:
self.ui.label_int_text_2.setEnabled(True)
self.ui.label_int_min_2.setEnabled(True)
self.ui.label_int_max_2.setEnabled(True)
self.ui.lineEdit_int_var_2.setEnabled(True)
self.ui.lineEdit_int_min_2.setEnabled(True)
self.ui.lineEdit_int_max_2.setEnabled(True)
self.ui.label_int_text_3.setEnabled(True)
self.ui.label_int_min_3.setEnabled(True)
self.ui.label_int_max_3.setEnabled(True)
self.ui.lineEdit_int_var_3.setEnabled(True)
self.ui.lineEdit_int_min_3.setEnabled(True)
self.ui.lineEdit_int_max_3.setEnabled(True)
def about(self):
'''Popup a box with about message.'''
QMessageBox.about(self, "tercera-mano",
"""<b>Tercera-mano</b> v %s
<p>Copyleft - 2013 Jairo Estefania.
<p>Some rights reserved in accordance with
LGPL - NO WARRANTIES!
<p>Enjoy ;)
<p>Python %s - sympy version %s - \
PySide version %s - Qt version %s on %s """ % (__version__,
platform.python_version(), sympy.__version__,
PySide.__version__, PySide.QtCore.__version__,
platform.system()))
def __init__(self, parent=None):
super(ControlMainWindow, self).__init__(parent)
# Esto es siempre lo mismo
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# conexiones entre los eventos de la GUI y las funciones
QtCore.QObject.connect(self.ui.pushButton_asociacion_impedancias,
QtCore.SIGNAL("clicked()"), self.asociacion_de_impedancias)
QtCore.QObject.connect(self.ui.pushButton_calcula_impedancias,
QtCore.SIGNAL("clicked()"), self.calcula_de_impedancias)
## QtCore.QObject.connect(self.ui.pB_dt, QtCore.SIGNAL("clicked"),
## self.anda_estrella_triangulo)
QtCore.QObject.connect(self.ui.pushButton_integra,
QtCore.SIGNAL("clicked()"), self.integrar)
QtCore.QObject.connect(self.ui.pushButton_deriva,
QtCore.SIGNAL("clicked()"), self.derivar)
QtCore.QObject.connect(self.ui.pB_dt_cal,
QtCore.SIGNAL("clicked()"), self.estrella_triangulo)
QtCore.QObject.connect(self.ui.actionAbout,
QtCore.SIGNAL("triggered()"), self.about)
QtCore.QObject.connect(self.ui.spinBox_int,
QtCore.SIGNAL("valueChanged(int)"), self.test_spin)
QtCore.QObject.connect(self.ui.spinBox_int,
QtCore.SIGNAL("valueChanged(int)"), self.test_spin)
#test
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
mySW = ControlMainWindow()
mySW.show()
sys.exit(app.exec_())
| lgpl-3.0 |
StephenChusang/py-faster-rcnn-tracker | lib/rpn/proposal_layer.py | 6 | 6803 | # --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
import caffe
import numpy as np
import yaml
from fast_rcnn.config import cfg
from generate_anchors import generate_anchors
from fast_rcnn.bbox_transform import bbox_transform_inv, clip_boxes
from fast_rcnn.nms_wrapper import nms
DEBUG = False
class ProposalLayer(caffe.Layer):
"""
Outputs object detection proposals by applying estimated bounding-box
transformations to a set of regular boxes (called "anchors").
"""
def setup(self, bottom, top):
# parse the layer parameter string, which must be valid YAML
layer_params = yaml.load(self.param_str_)
self._feat_stride = layer_params['feat_stride']
anchor_scales = layer_params.get('scales', (8, 16, 32))
self._anchors = generate_anchors(scales=np.array(anchor_scales))
self._num_anchors = self._anchors.shape[0]
if DEBUG:
print 'feat_stride: {}'.format(self._feat_stride)
print 'anchors:'
print self._anchors
# rois blob: holds R regions of interest, each is a 5-tuple
# (n, x1, y1, x2, y2) specifying an image batch index n and a
# rectangle (x1, y1, x2, y2)
top[0].reshape(1, 5)
# scores blob: holds scores for R regions of interest
if len(top) > 1:
top[1].reshape(1, 1, 1, 1)
def forward(self, bottom, top):
# Algorithm:
#
# for each (H, W) location i
# generate A anchor boxes centered on cell i
# apply predicted bbox deltas at cell i to each of the A anchors
# clip predicted boxes to image
# remove predicted boxes with either height or width < threshold
# sort all (proposal, score) pairs by score from highest to lowest
# take top pre_nms_topN proposals before NMS
# apply NMS with threshold 0.7 to remaining proposals
# take after_nms_topN proposals after NMS
# return the top proposals (-> RoIs top, scores top)
assert bottom[0].data.shape[0] == 1, \
'Only single item batches are supported'
cfg_key = str(self.phase) # either 'TRAIN' or 'TEST'
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
min_size = cfg[cfg_key].RPN_MIN_SIZE
# the first set of _num_anchors channels are bg probs
# the second set are the fg probs, which we want
scores = bottom[0].data[:, self._num_anchors:, :, :]
bbox_deltas = bottom[1].data
im_info = bottom[2].data[0, :]
if DEBUG:
print 'im_size: ({}, {})'.format(im_info[0], im_info[1])
print 'scale: {}'.format(im_info[2])
# 1. Generate proposals from bbox deltas and shifted anchors
height, width = scores.shape[-2:]
if DEBUG:
print 'score map size: {}'.format(scores.shape)
# Enumerate all shifts
shift_x = np.arange(0, width) * self._feat_stride
shift_y = np.arange(0, height) * self._feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# Enumerate all shifted anchors:
#
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = self._num_anchors
K = shifts.shape[0]
anchors = self._anchors.reshape((1, A, 4)) + \
shifts.reshape((1, K, 4)).transpose((1, 0, 2))
anchors = anchors.reshape((K * A, 4))
# Transpose and reshape predicted bbox transformations to get them
# into the same order as the anchors:
#
# bbox deltas will be (1, 4 * A, H, W) format
# transpose to (1, H, W, 4 * A)
# reshape to (1 * H * W * A, 4) where rows are ordered by (h, w, a)
# in slowest to fastest order
bbox_deltas = bbox_deltas.transpose((0, 2, 3, 1)).reshape((-1, 4))
# Same story for the scores:
#
# scores are (1, A, H, W) format
# transpose to (1, H, W, A)
# reshape to (1 * H * W * A, 1) where rows are ordered by (h, w, a)
scores = scores.transpose((0, 2, 3, 1)).reshape((-1, 1))
# Convert anchors into proposals via bbox transformations
proposals = bbox_transform_inv(anchors, bbox_deltas)
# 2. clip predicted boxes to image
proposals = clip_boxes(proposals, im_info[:2])
# 3. remove predicted boxes with either height or width < threshold
# (NOTE: convert min_size to input image scale stored in im_info[2])
keep = _filter_boxes(proposals, min_size * im_info[2])
proposals = proposals[keep, :]
scores = scores[keep]
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take top pre_nms_topN (e.g. 6000)
order = scores.ravel().argsort()[::-1]
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
proposals = proposals[order, :]
scores = scores[order]
# 6. apply nms (e.g. threshold = 0.7)
# 7. take after_nms_topN (e.g. 300)
# 8. return the top proposals (-> RoIs top)
keep = nms(np.hstack((proposals, scores)), nms_thresh)
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
# Output rois blob
# Our RPN implementation only supports a single input image, so all
# batch inds are 0
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
top[0].reshape(*(blob.shape))
top[0].data[...] = blob
# [Optional] output scores blob
if len(top) > 1:
top[1].reshape(*(scores.shape))
top[1].data[...] = scores
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def _filter_boxes(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
keep = np.where((ws >= min_size) & (hs >= min_size))[0]
return keep
| mit |
sho-h/ruby_env | devkit/mingw/bin/lib/distutils/errors.py | 59 | 3582 | """distutils.errors
Provides exceptions used by the Distutils modules. Note that Distutils
modules may raise standard exceptions; in particular, SystemExit is
usually raised for errors that are obviously the end-user's fault
(eg. bad command-line arguments).
This module is safe to use in "from ... import *" mode; it only exports
symbols whose names start with "Distutils" and end with "Error"."""
__revision__ = "$Id$"
class DistutilsError(Exception):
"""The root of all Distutils evil."""
class DistutilsModuleError(DistutilsError):
"""Unable to load an expected module, or to find an expected class
within some module (in particular, command modules and classes)."""
class DistutilsClassError(DistutilsError):
"""Some command class (or possibly distribution class, if anyone
feels a need to subclass Distribution) is found not to be holding
up its end of the bargain, ie. implementing some part of the
"command "interface."""
class DistutilsGetoptError(DistutilsError):
"""The option table provided to 'fancy_getopt()' is bogus."""
class DistutilsArgError(DistutilsError):
"""Raised by fancy_getopt in response to getopt.error -- ie. an
error in the command line usage."""
class DistutilsFileError(DistutilsError):
"""Any problems in the filesystem: expected file not found, etc.
Typically this is for problems that we detect before IOError or
OSError could be raised."""
class DistutilsOptionError(DistutilsError):
"""Syntactic/semantic errors in command options, such as use of
mutually conflicting options, or inconsistent options,
badly-spelled values, etc. No distinction is made between option
values originating in the setup script, the command line, config
files, or what-have-you -- but if we *know* something originated in
the setup script, we'll raise DistutilsSetupError instead."""
class DistutilsSetupError(DistutilsError):
"""For errors that can be definitely blamed on the setup script,
such as invalid keyword arguments to 'setup()'."""
class DistutilsPlatformError(DistutilsError):
"""We don't know how to do something on the current platform (but
we do know how to do it on some platform) -- eg. trying to compile
C files on a platform not supported by a CCompiler subclass."""
class DistutilsExecError(DistutilsError):
"""Any problems executing an external program (such as the C
compiler, when compiling C files)."""
class DistutilsInternalError(DistutilsError):
"""Internal inconsistencies or impossibilities (obviously, this
should never be seen if the code is working!)."""
class DistutilsTemplateError(DistutilsError):
"""Syntax error in a file list template."""
class DistutilsByteCompileError(DistutilsError):
"""Byte compile error."""
# Exception classes used by the CCompiler implementation classes
class CCompilerError(Exception):
"""Some compile/link operation failed."""
class PreprocessError(CCompilerError):
"""Failure to preprocess one or more C/C++ files."""
class CompileError(CCompilerError):
"""Failure to compile one or more C/C++ source files."""
class LibError(CCompilerError):
"""Failure to create a static library from one or more C/C++ object
files."""
class LinkError(CCompilerError):
"""Failure to link one or more C/C++ object files into an executable
or shared library file."""
class UnknownFileError(CCompilerError):
"""Attempt to process an unknown file type."""
| mit |
maoxuxiang/termite_mallet_project | web2py/gluon/contrib/login_methods/dropbox_account.py | 42 | 4563 | #!/usr/bin/env python
# coding: utf8
"""
Dropbox Authentication for web2py
Developed by Massimo Di Pierro (2012)
Same License as Web2py License
"""
# mind here session is dropbox session, not current.session
import os
import re
import urllib
from dropbox import client, rest, session
from gluon import *
from gluon.tools import fetch
from gluon.storage import Storage
import gluon.contrib.simplejson as json
class DropboxAccount(object):
"""
from gluon.contrib.login_methods.dropbox_account import DropboxAccount
auth.settings.actions_disabled=['register','change_password',
'request_reset_password']
auth.settings.login_form = DropboxAccount(request,
key="...",
secret="...",
access_type="...",
login_url = "http://localhost:8000/%s/default/user/login" % request.application)
when logged in
client = auth.settings.login_form.client
"""
def __init__(self,
request,
key="",
secret="",
access_type="app_folder",
login_url="",
on_login_failure=None,
):
self.request = request
self.key = key
self.secret = secret
self.access_type = access_type
self.login_url = login_url
self.on_login_failure = on_login_failure
self.sess = session.DropboxSession(
self.key, self.secret, self.access_type)
def get_token(self):
if not current.session.dropbox_access_token:
request_token = current.session.dropbox_request_token
self.sess.set_request_token(request_token[0], request_token[1])
access_token = self.sess.obtain_access_token(self.sess.token)
current.session.dropbox_access_token = \
(access_token.key, access_token.secret)
else:
access_token = current.session.dropbox_access_token
self.sess.set_token(access_token[0], access_token[1])
def get_user(self):
if not current.session.dropbox_request_token:
return None
self.get_token()
user = Storage()
self.client = client.DropboxClient(self.sess)
data = self.client.account_info()
display_name = data.get('display_name', '').split(' ', 1)
user = dict(email=data.get('email', None),
first_name=display_name[0],
last_name=display_name[-1],
registration_id=data.get('uid', None))
if not user['registration_id'] and self.on_login_failure:
redirect(self.on_login_failure)
return user
def login_form(self):
request_token = self.sess.obtain_request_token()
current.session.dropbox_request_token = \
(request_token.key, request_token.secret)
dropbox_url = self.sess.build_authorize_url(request_token,
self.login_url)
redirect(dropbox_url)
form = IFRAME(_src=dropbox_url,
_scrolling="no",
_frameborder="no",
_style="width:400px;height:240px;")
return form
def logout_url(self, next="/"):
self.sess.unlink()
current.session.auth = None
return next
def get_client(self):
self.get_token()
self.client = client.DropboxClient(self.sess)
def put(self, filename, file):
if not hasattr(self,'client'): self.get_client()
return self.client.put_file(filename, file)['bytes']
def get(self, filename):
if not hasattr(self,'client'): self.get_client()
return self.client.get_file(filename)
def dir(self, path):
if not hasattr(self,'client'): self.get_client()
return self.client.metadata(path)
def use_dropbox(auth, filename='private/dropbox.key', **kwargs):
path = os.path.join(current.request.folder, filename)
if os.path.exists(path):
request = current.request
key, secret, access_type = open(path, 'r').read().strip().split(':')
host = current.request.env.http_host
login_url = "http://%s/%s/default/user/login" % \
(host, request.application)
auth.settings.actions_disabled = \
['register', 'change_password', 'request_reset_password']
auth.settings.login_form = DropboxAccount(
request, key=key, secret=secret, access_type=access_type,
login_url=login_url, **kwargs)
| bsd-3-clause |
s2oBCN/selenium | py/test/selenium/webdriver/common/utils.py | 68 | 2155 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import socket
import sys
import unittest
def run_tests(test_case, driver, webserver):
logging.basicConfig(level=logging.WARN)
webserver.start()
try:
testLoader = unittest.TestLoader()
testRunner = unittest.TextTestRunner()
test_case_name = "selenium.test.selenium.webdriver.common.%s" % test_case
if len(sys.argv) > 1:
testMethod = sys.argv[1]
testRunner.run(
testLoader.loadTestsFromName(
"%s.%s" % (test_case_name, testMethod)))
else:
testRunner.run(testLoader.loadTestsFromName(test_case_name))
driver.quit()
finally:
webserver.stop()
def require_online(func):
"""Only exucte the test method if the internet is accessible."""
def testMethod(self):
socket_ = socket.socket()
try:
socket_.settimeout(1)
socket_.connect(("www.google.com", 80))
return func(self)
except socket.error:
return lambda x: None
testMethod.func_name = func.func_name
return testMethod
def convert_cookie_to_json(cookie):
cookie_dict = {}
for key, value in cookie.items():
if key == "expires":
cookie_dict["expiry"] = int(value) * 1000
else:
cookie_dict[key] = value
return cookie_dict
| apache-2.0 |
gunrock/gunrock | tools/gr_to_mtx_symmetric.py | 4 | 1322 | #!/usr/local/bin/python
"""
Simple python script to convert .gr format graph to .mtx format
"""
import os
import sys
import string
### check command line args
if (len(sys.argv)) != 2:
print ' Usage: python gr_to_mtx_symmetric.py graph.gr'
sys.exit()
### gr graph input
file_gr = sys.argv[1]
### matrix-market format output file
file_mm = sys.argv[1].split('.')[0] + ".symmetric.mtx"
line_num = 0;
with open(file_gr, 'r') as gr, open(file_mm, 'w') as mm:
mm.write('%%MatrixMarket matrix coordinate Integer symmetric\n')
for line in gr:
### skip blank lines and comments
if line.strip() == '' or 'c' in line:
pass
else:
item = line.split(' ')
if item[0] == 'p':
### write first line -> nodes nodes edges
n = item[2]
e = item[3].split()
e = e[0]
write = str(n) + ' ' + str(n)+ ' ' + str(e) + '\n'
mm.write(write)
if item[0] == 'a':
### write rest of mtx contents -> dst src wight
v = item[1]
u = item[2]
w = item[3].split()
w = w[0]
write = str(u) + ' ' + str(v) + ' ' + str(w) + '\n'
mm.write(write)
gr.close()
mm.close()
| apache-2.0 |
xiejianying/pjsip_trunk | tests/pjsua/mod_sipp.py | 19 | 8562 | # $Id$
## Automatic test module for SIPp.
##
## This module will need a test driver for each SIPp scenario:
## - For simple scenario, i.e: make/receive call (including auth), this
## test module can auto-generate a default test driver, i.e: make call
## or apply auto answer. Just name the SIPp scenario using "uas" or
## "uac" prefix accordingly.
## - Custom test driver can be defined in a python script file containing
## a list of the PJSUA instances and another list for PJSUA expects/
## commands. The custom test driver file must use the same filename as
## the SIPp XML scenario. See samples of SIPp scenario + its driver
## in tests/pjsua/scripts-sipp/ folder for detail.
##
## Here are defined macros that can be used in the custom driver:
## - $SIPP_PORT : SIPp binding port
## - $SIPP_URI : SIPp SIP URI
## - $PJSUA_PORT[N] : binding port of PJSUA instance #N
## - $PJSUA_URI[N] : SIP URI of PJSUA instance #N
import ctypes
import time
import imp
import sys
import os
import re
import subprocess
from inc_cfg import *
import inc_const
# flags that test is running in Unix
G_INUNIX = False
if sys.platform.lower().find("win32")!=-1 or sys.platform.lower().find("microsoft")!=-1:
G_INUNIX = False
else:
G_INUNIX = True
# /dev/null handle, for redirecting output when SIPP is not in background mode
FDEVNULL = None
# SIPp executable path and param
#SIPP_PATH = '"C:\\devs\\bin\\Sipp_3.2\\sipp.exe"'
SIPP_PATH = 'sipp'
SIPP_PORT = 6000
SIPP_PARAM = "-m 1 -i 127.0.0.1 -p " + str(SIPP_PORT)
SIPP_TIMEOUT = 60
# On BG mode, SIPp doesn't require special terminal
# On non-BG mode, on win, it needs env var: "TERMINFO=c:\cygwin\usr\share\terminfo"
# TODO: on unix with BG mode, waitpid() always fails, need to be fixed
SIPP_BG_MODE = False
#SIPP_BG_MODE = not G_INUNIX
# Will be updated based on the test driver file (a .py file whose the same name as SIPp XML file)
PJSUA_INST_PARAM = []
PJSUA_EXPECTS = []
# Default PJSUA param if test driver is not available:
# - no-tcp as SIPp is on UDP only
# - id, username, and realm: to allow PJSUA sending re-INVITE with auth after receiving 401/407 response
PJSUA_DEF_PARAM = "--null-audio --max-calls=1 --no-tcp --id=sip:a@localhost --username=a --realm=*"
# Get SIPp scenario (XML file)
SIPP_SCEN_XML = ""
if ARGS[1].endswith('.xml'):
SIPP_SCEN_XML = ARGS[1]
else:
exit(-99)
# Functions for resolving macros in the test driver
def resolve_pjsua_port(mo):
return str(PJSUA_INST_PARAM[int(mo.group(1))].sip_port)
def resolve_pjsua_uri(mo):
return PJSUA_INST_PARAM[int(mo.group(1))].uri[1:-1]
def resolve_driver_macros(st):
st = re.sub("\$SIPP_PORT", str(SIPP_PORT), st)
st = re.sub("\$SIPP_URI", "sip:sipp@127.0.0.1:"+str(SIPP_PORT), st)
st = re.sub("\$PJSUA_PORT\[(\d+)\]", resolve_pjsua_port, st)
st = re.sub("\$PJSUA_URI\[(\d+)\]", resolve_pjsua_uri, st)
return st
# Init test driver
if os.access(SIPP_SCEN_XML[:-4]+".py", os.R_OK):
# Load test driver file (the corresponding .py file), if any
cfg_file = imp.load_source("cfg_file", SIPP_SCEN_XML[:-4]+".py")
for ua_idx, ua_param in enumerate(cfg_file.PJSUA):
ua_param = resolve_driver_macros(ua_param)
PJSUA_INST_PARAM.append(InstanceParam("pjsua"+str(ua_idx), ua_param))
PJSUA_EXPECTS = cfg_file.PJSUA_EXPECTS
else:
# Generate default test driver
if os.path.basename(SIPP_SCEN_XML)[0:3] == "uas":
# auto make call when SIPp is as UAS
ua_param = PJSUA_DEF_PARAM + " sip:127.0.0.1:" + str(SIPP_PORT)
else:
# auto answer when SIPp is as UAC
ua_param = PJSUA_DEF_PARAM + " --auto-answer=200"
PJSUA_INST_PARAM.append(InstanceParam("pjsua", ua_param))
# Start SIPp process, returning PID
def start_sipp():
global SIPP_BG_MODE
sipp_proc = None
sipp_param = SIPP_PARAM + " -sf " + SIPP_SCEN_XML
if SIPP_BG_MODE:
sipp_param = sipp_param + " -bg"
if SIPP_TIMEOUT:
sipp_param = sipp_param + " -timeout "+str(SIPP_TIMEOUT)+"s -timeout_error" + " -deadcall_wait "+str(SIPP_TIMEOUT)+"s"
# add target param
sipp_param = sipp_param + " 127.0.0.1:" + str(PJSUA_INST_PARAM[0].sip_port)
# run SIPp
fullcmd = os.path.normpath(SIPP_PATH) + " " + sipp_param
print "Running SIPP: " + fullcmd
if SIPP_BG_MODE:
sipp_proc = subprocess.Popen(fullcmd, bufsize=0, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=G_INUNIX, universal_newlines=False)
else:
# redirect output to NULL
global FDEVNULL
#FDEVNULL = open(os.devnull, 'w')
FDEVNULL = open("logs/sipp_output.tmp", 'w')
sipp_proc = subprocess.Popen(fullcmd, shell=G_INUNIX, stdout=FDEVNULL, stderr=FDEVNULL)
if not SIPP_BG_MODE:
if sipp_proc == None or sipp_proc.poll():
return None
return sipp_proc
else:
# get SIPp child process PID
pid = 0
r = re.compile("PID=\[(\d+)\]", re.I)
while True:
line = sipp_proc.stdout.readline()
pid_r = r.search(line)
if pid_r:
pid = int(pid_r.group(1))
break
if not sipp_proc.poll():
break
if pid != 0:
# Win specific: get process handle from PID, as on win32, os.waitpid() takes process handle instead of pid
if (sys.platform == "win32"):
SYNCHRONIZE = 0x00100000
PROCESS_QUERY_INFORMATION = 0x0400
hnd = ctypes.windll.kernel32.OpenProcess(SYNCHRONIZE | PROCESS_QUERY_INFORMATION, False, pid)
pid = hnd
return pid
# Wait SIPp process to exit, returning SIPp exit code
def wait_sipp(sipp):
if not SIPP_BG_MODE:
global FDEVNULL
sipp.wait()
FDEVNULL.close()
return sipp.returncode
else:
print "Waiting SIPp (PID=" + str(sipp) + ") to exit.."
wait_cnt = 0
while True:
try:
wait_cnt = wait_cnt + 1
[pid_, ret_code] = os.waitpid(sipp, 0)
if sipp == pid_:
#print "SIPP returned ", ret_code
ret_code = ret_code >> 8
# Win specific: Close process handle
if (sys.platform == "win32"):
ctypes.windll.kernel32.CloseHandle(sipp)
return ret_code
except os.error:
if wait_cnt <= 5:
print "Retry ("+str(wait_cnt)+") waiting SIPp.."
else:
return -99
# Execute PJSUA flow
def exec_pjsua_expects(t, sipp):
# Get all PJSUA instances
ua = []
for ua_idx in range(len(PJSUA_INST_PARAM)):
ua.append(t.process[ua_idx])
ua_err_st = ""
while len(PJSUA_EXPECTS):
expect = PJSUA_EXPECTS.pop(0)
ua_idx = expect[0]
expect_st = expect[1]
send_cmd = resolve_driver_macros(expect[2])
# Handle exception in pjsua flow, to avoid zombie SIPp process
try:
if expect_st != "":
ua[ua_idx].expect(expect_st, raise_on_error = True)
if send_cmd != "":
ua[ua_idx].send(send_cmd)
except TestError, e:
ua_err_st = e.desc
break;
except:
ua_err_st = "Unknown error"
break;
# Need to poll here for handling these cases:
# - If there is no PJSUA EXPECT scenario, we must keep polling the stdout,
# otherwise PJSUA process may stuck (due to stdout pipe buffer full?).
# - last PJSUA_EXPECT contains a pjsua command that needs time to
# finish, for example "v" (re-INVITE), the SIPp XML scenario may expect
# that re-INVITE transaction to be completed and without stdout poll
# PJSUA process may stuck.
# Ideally the poll should be done contiunously until SIPp process is
# terminated.
# Update: now pjsua stdout is polled continuously by a dedicated thread,
# so the poll is no longer needed
#for ua_idx in range(len(ua)):
# ua[ua_idx].expect(inc_const.STDOUT_REFRESH, raise_on_error = False)
return ua_err_st
def sipp_err_to_str(err_code):
if err_code == 0:
return "All calls were successful"
elif err_code == 1:
return "At least one call failed"
elif err_code == 97:
return "exit on internal command. Calls may have been processed"
elif err_code == 99:
return "Normal exit without calls processed"
elif err_code == -1:
return "Fatal error (timeout)"
elif err_code == -2:
return "Fatal error binding a socket"
else:
return "Unknown error"
# Test body function
def TEST_FUNC(t):
sipp_ret_code = 0
ua_err_st = ""
sipp = start_sipp()
if not sipp:
raise TestError("Failed starting SIPp")
ua_err_st = exec_pjsua_expects(t, sipp)
sipp_ret_code = wait_sipp(sipp)
if ua_err_st != "":
raise TestError(ua_err_st)
if sipp_ret_code:
rc = ctypes.c_byte(sipp_ret_code).value
raise TestError("SIPp returned error " + str(rc) + ": " + sipp_err_to_str(rc))
# Here where it all comes together
test = TestParam(SIPP_SCEN_XML[:-4],
PJSUA_INST_PARAM,
TEST_FUNC)
| gpl-2.0 |
bavardage/statsmodels | statsmodels/tsa/varma_process.py | 3 | 19937 | # -*- coding: utf-8 -*-
""" Helper and filter functions for VAR and VARMA, and basic VAR class
Created on Mon Jan 11 11:04:23 2010
Author: josef-pktd
License: BSD
This is a new version, I didn't look at the old version again, but similar
ideas.
not copied/cleaned yet:
* fftn based filtering, creating samples with fft
* Tests: I ran examples but did not convert them to tests
examples look good for parameter estimate and forecast, and filter functions
main TODOs:
* result statistics
* see whether Bayesian dummy observation can be included without changing
the single call to linalg.lstsq
* impulse response function does not treat correlation, see Hamilton and jplv
Extensions
* constraints, Bayesian priors/penalization
* Error Correction Form and Cointegration
* Factor Models Stock-Watson, ???
see also VAR section in Notes.txt
"""
import numpy as np
from numpy.testing import assert_equal
from scipy import signal
#might not (yet) need the following
from scipy.signal.signaltools import _centered as trim_centered
from statsmodels.tsa.tsatools import lagmat
def varfilter(x, a):
'''apply an autoregressive filter to a series x
Warning: I just found out that convolve doesn't work as I
thought, this likely doesn't work correctly for
nvars>3
x can be 2d, a can be 1d, 2d, or 3d
Parameters
----------
x : array_like
data array, 1d or 2d, if 2d then observations in rows
a : array_like
autoregressive filter coefficients, ar lag polynomial
see Notes
Returns
-------
y : ndarray, 2d
filtered array, number of columns determined by x and a
Notes
-----
In general form this uses the linear filter ::
y = a(L)x
where
x : nobs, nvars
a : nlags, nvars, npoly
Depending on the shape and dimension of a this uses different
Lag polynomial arrays
case 1 : a is 1d or (nlags,1)
one lag polynomial is applied to all variables (columns of x)
case 2 : a is 2d, (nlags, nvars)
each series is independently filtered with its own
lag polynomial, uses loop over nvar
case 3 : a is 3d, (nlags, nvars, npoly)
the ith column of the output array is given by the linear filter
defined by the 2d array a[:,:,i], i.e. ::
y[:,i] = a(.,.,i)(L) * x
y[t,i] = sum_p sum_j a(p,j,i)*x(t-p,j)
for p = 0,...nlags-1, j = 0,...nvars-1,
for all t >= nlags
Note: maybe convert to axis=1, Not
TODO: initial conditions
'''
x = np.asarray(x)
a = np.asarray(a)
if x.ndim == 1:
x = x[:,None]
if x.ndim > 2:
raise ValueError('x array has to be 1d or 2d')
nvar = x.shape[1]
nlags = a.shape[0]
ntrim = nlags//2
# for x is 2d with ncols >1
if a.ndim == 1:
# case: identical ar filter (lag polynomial)
return signal.convolve(x, a[:,None], mode='valid')
# alternative:
#return signal.lfilter(a,[1],x.astype(float),axis=0)
elif a.ndim == 2:
if min(a.shape) == 1:
# case: identical ar filter (lag polynomial)
return signal.convolve(x, a, mode='valid')
# case: independent ar
#(a bit like recserar in gauss, but no x yet)
#(no, reserar is inverse filter)
result = np.zeros((x.shape[0]-nlags+1, nvar))
for i in range(nvar):
# could also use np.convolve, but easier for swiching to fft
result[:,i] = signal.convolve(x[:,i], a[:,i], mode='valid')
return result
elif a.ndim == 3:
# case: vector autoregressive with lag matrices
# #not necessary:
# if np.any(a.shape[1:] != nvar):
# raise ValueError('if 3d shape of a has to be (nobs,nvar,nvar)')
yf = signal.convolve(x[:,:,None], a)
yvalid = yf[ntrim:-ntrim, yf.shape[1]//2,:]
return yvalid
def varinversefilter(ar, nobs, version=1):
'''creates inverse ar filter (MA representation) recursively
The VAR lag polynomial is defined by ::
ar(L) y_t = u_t or
y_t = -ar_{-1}(L) y_{t-1} + u_t
the returned lagpolynomial is arinv(L)=ar^{-1}(L) in ::
y_t = arinv(L) u_t
Parameters
----------
ar : array, (nlags,nvars,nvars)
matrix lagpolynomial, currently no exog
first row should be identity
Returns
-------
arinv : array, (nobs,nvars,nvars)
Notes
-----
'''
nlags, nvars, nvarsex = ar.shape
if nvars != nvarsex:
print 'exogenous variables not implemented not tested'
arinv = np.zeros((nobs+1, nvarsex, nvars))
arinv[0,:,:] = ar[0]
arinv[1:nlags,:,:] = -ar[1:]
if version == 1:
for i in range(2,nobs+1):
tmp = np.zeros((nvars,nvars))
for p in range(1,nlags):
tmp += np.dot(-ar[p],arinv[i-p,:,:])
arinv[i,:,:] = tmp
if version == 0:
for i in range(nlags+1,nobs+1):
print ar[1:].shape, arinv[i-1:i-nlags:-1,:,:].shape
#arinv[i,:,:] = np.dot(-ar[1:],arinv[i-1:i-nlags:-1,:,:])
#print np.tensordot(-ar[1:],arinv[i-1:i-nlags:-1,:,:],axes=([2],[1])).shape
#arinv[i,:,:] = np.tensordot(-ar[1:],arinv[i-1:i-nlags:-1,:,:],axes=([2],[1]))
raise NotImplementedError('waiting for generalized ufuncs or something')
return arinv
def vargenerate(ar, u, initvalues=None):
'''generate an VAR process with errors u
similar to gauss
uses loop
Parameters
----------
ar : array (nlags,nvars,nvars)
matrix lagpolynomial
u : array (nobs,nvars)
exogenous variable, error term for VAR
Returns
-------
sar : array (1+nobs,nvars)
sample of var process, inverse filtered u
does not trim initial condition y_0 = 0
Examples
--------
# generate random sample of VAR
nobs, nvars = 10, 2
u = numpy.random.randn(nobs,nvars)
a21 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.8, 0. ],
[ 0., -0.6]]])
vargenerate(a21,u)
# Impulse Response to an initial shock to the first variable
imp = np.zeros((nobs, nvars))
imp[0,0] = 1
vargenerate(a21,imp)
'''
nlags, nvars, nvarsex = ar.shape
nlagsm1 = nlags - 1
nobs = u.shape[0]
if nvars != nvarsex:
print 'exogenous variables not implemented not tested'
if u.shape[1] != nvars:
raise ValueError('u needs to have nvars columns')
if initvalues is None:
sar = np.zeros((nobs+nlagsm1, nvars))
start = nlagsm1
else:
start = max(nlagsm1, initvalues.shape[0])
sar = np.zeros((nobs+start, nvars))
sar[start-initvalues.shape[0]:start] = initvalues
#sar[nlagsm1:] = u
sar[start:] = u
#if version == 1:
for i in range(start,start+nobs):
for p in range(1,nlags):
sar[i] += np.dot(sar[i-p,:],-ar[p])
return sar
def padone(x, front=0, back=0, axis=0, fillvalue=0):
'''pad with zeros along one axis, currently only axis=0
can be used sequentially to pad several axis
Examples
--------
>>> padone(np.ones((2,3)),1,3,axis=1)
array([[ 0., 1., 1., 1., 0., 0., 0.],
[ 0., 1., 1., 1., 0., 0., 0.]])
>>> padone(np.ones((2,3)),1,1, fillvalue=np.nan)
array([[ NaN, NaN, NaN],
[ 1., 1., 1.],
[ 1., 1., 1.],
[ NaN, NaN, NaN]])
'''
#primitive version
shape = np.array(x.shape)
shape[axis] += (front + back)
shapearr = np.array(x.shape)
out = np.empty(shape)
out.fill(fillvalue)
startind = np.zeros(x.ndim)
startind[axis] = front
endind = startind + shapearr
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
#print myslice
#print out.shape
#print out[tuple(myslice)].shape
out[tuple(myslice)] = x
return out
def trimone(x, front=0, back=0, axis=0):
'''trim number of array elements along one axis
Examples
--------
>>> xp = padone(np.ones((2,3)),1,3,axis=1)
>>> xp
array([[ 0., 1., 1., 1., 0., 0., 0.],
[ 0., 1., 1., 1., 0., 0., 0.]])
>>> trimone(xp,1,3,1)
array([[ 1., 1., 1.],
[ 1., 1., 1.]])
'''
shape = np.array(x.shape)
shape[axis] -= (front + back)
#print shape, front, back
shapearr = np.array(x.shape)
startind = np.zeros(x.ndim)
startind[axis] = front
endind = startind + shape
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
#print myslice
#print shape, endind
#print x[tuple(myslice)].shape
return x[tuple(myslice)]
def ar2full(ar):
'''make reduced lagpolynomial into a right side lagpoly array
'''
nlags, nvar,nvarex = ar.shape
return np.r_[np.eye(nvar,nvarex)[None,:,:],-ar]
def ar2lhs(ar):
'''convert full (rhs) lagpolynomial into a reduced, left side lagpoly array
this is mainly a reminder about the definition
'''
return -ar[1:]
class _Var(object):
'''obsolete VAR class, use tsa.VAR instead, for internal use only
Example
-------
>>> v = Var(ar2s)
>>> v.fit(1)
>>> v.arhat
array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.77784898, 0.01726193],
[ 0.10733009, -0.78665335]]])
'''
def __init__(self, y):
self.y = y
self.nobs, self.nvars = y.shape
def fit(self, nlags):
'''estimate parameters using ols
Parameters
----------
nlags : integer
number of lags to include in regression, same for all variables
Returns
-------
None, but attaches
arhat : array (nlags, nvar, nvar)
full lag polynomial array
arlhs : array (nlags-1, nvar, nvar)
reduced lag polynomial for left hand side
other statistics as returned by linalg.lstsq : need to be completed
This currently assumes all parameters are estimated without restrictions.
In this case SUR is identical to OLS
estimation results are attached to the class instance
'''
self.nlags = nlags # without current period
nvars = self.nvars
#TODO: ar2s looks like a module variable, bug?
#lmat = lagmat(ar2s, nlags, trim='both', original='in')
lmat = lagmat(self.y, nlags, trim='both', original='in')
self.yred = lmat[:,:nvars]
self.xred = lmat[:,nvars:]
res = np.linalg.lstsq(self.xred, self.yred)
self.estresults = res
self.arlhs = res[0].reshape(nlags, nvars, nvars)
self.arhat = ar2full(self.arlhs)
self.rss = res[1]
self.xredrank = res[2]
def predict(self):
'''calculate estimated timeseries (yhat) for sample
'''
if not hasattr(self, 'yhat'):
self.yhat = varfilter(self.y, self.arhat)
return self.yhat
def covmat(self):
''' covariance matrix of estimate
# not sure it's correct, need to check orientation everywhere
# looks ok, display needs getting used to
>>> v.rss[None,None,:]*np.linalg.inv(np.dot(v.xred.T,v.xred))[:,:,None]
array([[[ 0.37247445, 0.32210609],
[ 0.1002642 , 0.08670584]],
[[ 0.1002642 , 0.08670584],
[ 0.45903637, 0.39696255]]])
>>>
>>> v.rss[0]*np.linalg.inv(np.dot(v.xred.T,v.xred))
array([[ 0.37247445, 0.1002642 ],
[ 0.1002642 , 0.45903637]])
>>> v.rss[1]*np.linalg.inv(np.dot(v.xred.T,v.xred))
array([[ 0.32210609, 0.08670584],
[ 0.08670584, 0.39696255]])
'''
#check if orientation is same as self.arhat
self.paramcov = (self.rss[None,None,:] *
np.linalg.inv(np.dot(self.xred.T, self.xred))[:,:,None])
def forecast(self, horiz=1, u=None):
'''calculates forcast for horiz number of periods at end of sample
Parameters
----------
horiz : int (optional, default=1)
forecast horizon
u : array (horiz, nvars)
error term for forecast periods. If None, then u is zero.
Returns
-------
yforecast : array (nobs+horiz, nvars)
this includes the sample and the forecasts
'''
if u is None:
u = np.zeros((horiz, self.nvars))
return vargenerate(self.arhat, u, initvalues=self.y)
class VarmaPoly(object):
'''class to keep track of Varma polynomial format
Examples
--------
ar23 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.6, 0. ],
[ 0.2, -0.6]],
[[-0.1, 0. ],
[ 0.1, -0.1]]])
ma22 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[ 0.4, 0. ],
[ 0.2, 0.3]]])
'''
def __init__(self, ar, ma=None):
self.ar = ar
self.ma = ma
nlags, nvarall, nvars = ar.shape
self.nlags, self.nvarall, self.nvars = nlags, nvarall, nvars
self.isstructured = not (ar[0,:nvars] == np.eye(nvars)).all()
if self.ma is None:
self.ma = np.eye(nvars)[None,...]
self.isindependent = True
else:
self.isindependent = not (ma[0] == np.eye(nvars)).all()
self.malags = ar.shape[0]
self.hasexog = nvarall > nvars
self.arm1 = -ar[1:]
#@property
def vstack(self, a=None, name='ar'):
'''stack lagpolynomial vertically in 2d array
'''
if not a is None:
a = a
elif name == 'ar':
a = self.ar
elif name == 'ma':
a = self.ma
else:
raise ValueError('no array or name given')
return a.reshape(-1, self.nvarall)
#@property
def hstack(self, a=None, name='ar'):
'''stack lagpolynomial horizontally in 2d array
'''
if not a is None:
a = a
elif name == 'ar':
a = self.ar
elif name == 'ma':
a = self.ma
else:
raise ValueError('no array or name given')
return a.swapaxes(1,2).reshape(-1, self.nvarall).T
#@property
def stacksquare(self, a=None, name='ar', orientation='vertical'):
'''stack lagpolynomial vertically in 2d square array with eye
'''
if not a is None:
a = a
elif name == 'ar':
a = self.ar
elif name == 'ma':
a = self.ma
else:
raise ValueError('no array or name given')
astacked = a.reshape(-1, self.nvarall)
lenpk, nvars = astacked.shape #[0]
amat = np.eye(lenpk, k=nvars)
amat[:,:nvars] = astacked
return amat
#@property
def vstackarma_minus1(self):
'''stack ar and lagpolynomial vertically in 2d array
'''
a = np.concatenate((self.ar[1:], self.ma[1:]),0)
return a.reshape(-1, self.nvarall)
#@property
def hstackarma_minus1(self):
'''stack ar and lagpolynomial vertically in 2d array
this is the Kalman Filter representation, I think
'''
a = np.concatenate((self.ar[1:], self.ma[1:]),0)
return a.swapaxes(1,2).reshape(-1, self.nvarall)
def getisstationary(self, a=None):
'''check whether the auto-regressive lag-polynomial is stationary
Returns
-------
isstationary : boolean
*attaches*
areigenvalues : complex array
eigenvalues sorted by absolute value
References
----------
formula taken from NAG manual
'''
if not a is None:
a = a
else:
if self.isstructured:
a = -self.reduceform(self.ar)[1:]
else:
a = -self.ar[1:]
amat = self.stacksquare(a)
ev = np.sort(np.linalg.eigvals(amat))[::-1]
self.areigenvalues = ev
return (np.abs(ev) < 1).all()
def getisinvertible(self, a=None):
'''check whether the auto-regressive lag-polynomial is stationary
Returns
-------
isinvertible : boolean
*attaches*
maeigenvalues : complex array
eigenvalues sorted by absolute value
References
----------
formula taken from NAG manual
'''
if not a is None:
a = a
else:
if self.isindependent:
a = self.reduceform(self.ma)[1:]
else:
a = self.ma[1:]
if a.shape[0] == 0:
# no ma lags
self.maeigenvalues = np.array([], np.complex)
return True
amat = self.stacksquare(a)
ev = np.sort(np.linalg.eigvals(amat))[::-1]
self.maeigenvalues = ev
return (np.abs(ev) < 1).all()
def reduceform(self, apoly):
'''
this assumes no exog, todo
'''
if apoly.ndim != 3:
raise ValueError('apoly needs to be 3d')
nlags, nvarsex, nvars = apoly.shape
a = np.empty_like(apoly)
try:
a0inv = np.linalg.inv(a[0,:nvars, :])
except np.linalg.LinAlgError:
raise ValueError('matrix not invertible',
'ask for implementation of pinv')
for lag in range(nlags):
a[lag] = np.dot(a0inv, apoly[lag])
return a
if __name__ == "__main__":
# some example lag polynomials
a21 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.8, 0. ],
[ 0., -0.6]]])
a22 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.8, 0. ],
[ 0.1, -0.8]]])
a23 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.8, 0.2],
[ 0.1, -0.6]]])
a24 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.6, 0. ],
[ 0.2, -0.6]],
[[-0.1, 0. ],
[ 0.1, -0.1]]])
a31 = np.r_[np.eye(3)[None,:,:], 0.8*np.eye(3)[None,:,:]]
a32 = np.array([[[ 1. , 0. , 0. ],
[ 0. , 1. , 0. ],
[ 0. , 0. , 1. ]],
[[ 0.8, 0. , 0. ],
[ 0.1, 0.6, 0. ],
[ 0. , 0. , 0.9]]])
########
ut = np.random.randn(1000,2)
ar2s = vargenerate(a22,ut)
#res = np.linalg.lstsq(lagmat(ar2s,1)[:,1:], ar2s)
res = np.linalg.lstsq(lagmat(ar2s,1), ar2s)
bhat = res[0].reshape(1,2,2)
arhat = ar2full(bhat)
#print maxabs(arhat - a22)
v = _Var(ar2s)
v.fit(1)
v.forecast()
v.forecast(25)[-30:]
ar23 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.6, 0. ],
[ 0.2, -0.6]],
[[-0.1, 0. ],
[ 0.1, -0.1]]])
ma22 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[ 0.4, 0. ],
[ 0.2, 0.3]]])
ar23ns = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-1.9, 0. ],
[ 0.4, -0.6]],
[[ 0.3, 0. ],
[ 0.1, -0.1]]])
vp = VarmaPoly(ar23, ma22)
print vars(vp)
print vp.vstack()
print vp.vstack(a24)
print vp.hstackarma_minus1()
print vp.getisstationary()
print vp.getisinvertible()
vp2 = VarmaPoly(ar23ns)
print vp2.getisstationary()
print vp2.getisinvertible() # no ma lags
| bsd-3-clause |
sonnerm/games | games/skatai.py | 1 | 1235 | __author__ = 'michael'
from games import player, skat
class MauernderRandomPlayer(player.Player):
'''
just folds in the beginning, afterwards plays randomly. Usually better then a completly randomized player
'''
def move(self, moveview):
if skat.SkatCalls.Fold in moveview.get_options():
return skat.SkatCalls.Fold
return moveview.get_options().sample()
class GeneAI(player.Player):
'''
Not implemented yet
'''
def __init__(self,chromosome,*args,**kwargs):
super().__init__(*args,**kwargs)
self.reset()
self.chromosome=chromosome
def reset(self):
self.hand=[]
self.reset=True
def calculate_game(self,hand):
self.get_fehl()
def move(self, moveview):
if self.reset:
self.reset=False
self.calculate_game(moveview.hand)
if isinstance(moveview, skat.CallView):
if moveview.next_bid<=self.max_bid:
return moveview.next_bid
return skat.SkatCalls.Fold
elif isinstance(moveview, skat.RespondView):
if moveview.current_bid<=self.max_bid:
return skat.SkatCalls.Accept
return skat.SkatCalls.Fold
| agpl-3.0 |
Pretio/boto | boto/ec2/autoscale/launchconfig.py | 135 | 10807 | # Copyright (c) 2009 Reza Lotun http://reza.lotun.name/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.ec2.elb.listelement import ListElement
# Namespacing issue with deprecated local class
from boto.ec2.blockdevicemapping import BlockDeviceMapping as BDM
from boto.resultset import ResultSet
import boto.utils
import base64
# this should use the corresponding object from boto.ec2
# Currently in use by deprecated local BlockDeviceMapping class
class Ebs(object):
def __init__(self, connection=None, snapshot_id=None, volume_size=None):
self.connection = connection
self.snapshot_id = snapshot_id
self.volume_size = volume_size
def __repr__(self):
return 'Ebs(%s, %s)' % (self.snapshot_id, self.volume_size)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'SnapshotId':
self.snapshot_id = value
elif name == 'VolumeSize':
self.volume_size = value
class InstanceMonitoring(object):
def __init__(self, connection=None, enabled='false'):
self.connection = connection
self.enabled = enabled
def __repr__(self):
return 'InstanceMonitoring(%s)' % self.enabled
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Enabled':
self.enabled = value
# this should use the BlockDeviceMapping from boto.ec2.blockdevicemapping
# Currently in use by deprecated code for backwards compatability
# Removing this class can also remove the Ebs class in this same file
class BlockDeviceMapping(object):
def __init__(self, connection=None, device_name=None, virtual_name=None,
ebs=None, no_device=None):
self.connection = connection
self.device_name = device_name
self.virtual_name = virtual_name
self.ebs = ebs
self.no_device = no_device
def __repr__(self):
return 'BlockDeviceMapping(%s, %s)' % (self.device_name,
self.virtual_name)
def startElement(self, name, attrs, connection):
if name == 'Ebs':
self.ebs = Ebs(self)
return self.ebs
def endElement(self, name, value, connection):
if name == 'DeviceName':
self.device_name = value
elif name == 'VirtualName':
self.virtual_name = value
elif name == 'NoDevice':
self.no_device = bool(value)
class LaunchConfiguration(object):
def __init__(self, connection=None, name=None, image_id=None,
key_name=None, security_groups=None, user_data=None,
instance_type='m1.small', kernel_id=None,
ramdisk_id=None, block_device_mappings=None,
instance_monitoring=False, spot_price=None,
instance_profile_name=None, ebs_optimized=False,
associate_public_ip_address=None, volume_type=None,
delete_on_termination=True, iops=None,
use_block_device_types=False, classic_link_vpc_id=None,
classic_link_vpc_security_groups=None):
"""
A launch configuration.
:type name: str
:param name: Name of the launch configuration to create.
:type image_id: str
:param image_id: Unique ID of the Amazon Machine Image (AMI) which was
assigned during registration.
:type key_name: str
:param key_name: The name of the EC2 key pair.
:type security_groups: list
:param security_groups: Names or security group id's of the security
groups with which to associate the EC2 instances or VPC instances,
respectively.
:type user_data: str
:param user_data: The user data available to launched EC2 instances.
:type instance_type: str
:param instance_type: The instance type
:type kernel_id: str
:param kernel_id: Kernel id for instance
:type ramdisk_id: str
:param ramdisk_id: RAM disk id for instance
:type block_device_mappings: list
:param block_device_mappings: Specifies how block devices are exposed
for instances
:type instance_monitoring: bool
:param instance_monitoring: Whether instances in group are launched
with detailed monitoring.
:type spot_price: float
:param spot_price: The spot price you are bidding. Only applies
if you are building an autoscaling group with spot instances.
:type instance_profile_name: string
:param instance_profile_name: The name or the Amazon Resource
Name (ARN) of the instance profile associated with the IAM
role for the instance.
:type ebs_optimized: bool
:param ebs_optimized: Specifies whether the instance is optimized
for EBS I/O (true) or not (false).
:type associate_public_ip_address: bool
:param associate_public_ip_address: Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud.
Specifies whether to assign a public IP address to each instance launched in a Amazon VPC.
:type classic_link_vpc_id: str
:param classic_link_vpc_id: ID of ClassicLink enabled VPC.
:type classic_link_vpc_security_groups: list
:param classic_link_vpc_security_groups: Security group
id's of the security groups with which to associate the
ClassicLink VPC instances.
"""
self.connection = connection
self.name = name
self.instance_type = instance_type
self.block_device_mappings = block_device_mappings
self.key_name = key_name
sec_groups = security_groups or []
self.security_groups = ListElement(sec_groups)
self.image_id = image_id
self.ramdisk_id = ramdisk_id
self.created_time = None
self.kernel_id = kernel_id
self.user_data = user_data
self.created_time = None
self.instance_monitoring = instance_monitoring
self.spot_price = spot_price
self.instance_profile_name = instance_profile_name
self.launch_configuration_arn = None
self.ebs_optimized = ebs_optimized
self.associate_public_ip_address = associate_public_ip_address
self.volume_type = volume_type
self.delete_on_termination = delete_on_termination
self.iops = iops
self.use_block_device_types = use_block_device_types
self.classic_link_vpc_id = classic_link_vpc_id
classic_link_vpc_sec_groups = classic_link_vpc_security_groups or []
self.classic_link_vpc_security_groups = \
ListElement(classic_link_vpc_sec_groups)
if connection is not None:
self.use_block_device_types = connection.use_block_device_types
def __repr__(self):
return 'LaunchConfiguration:%s' % self.name
def startElement(self, name, attrs, connection):
if name == 'SecurityGroups':
return self.security_groups
elif name == 'ClassicLinkVPCSecurityGroups':
return self.classic_link_vpc_security_groups
elif name == 'BlockDeviceMappings':
if self.use_block_device_types:
self.block_device_mappings = BDM()
else:
self.block_device_mappings = ResultSet([('member', BlockDeviceMapping)])
return self.block_device_mappings
elif name == 'InstanceMonitoring':
self.instance_monitoring = InstanceMonitoring(self)
return self.instance_monitoring
def endElement(self, name, value, connection):
if name == 'InstanceType':
self.instance_type = value
elif name == 'LaunchConfigurationName':
self.name = value
elif name == 'KeyName':
self.key_name = value
elif name == 'ImageId':
self.image_id = value
elif name == 'CreatedTime':
self.created_time = boto.utils.parse_ts(value)
elif name == 'KernelId':
self.kernel_id = value
elif name == 'RamdiskId':
self.ramdisk_id = value
elif name == 'UserData':
try:
self.user_data = base64.b64decode(value)
except TypeError:
self.user_data = value
elif name == 'LaunchConfigurationARN':
self.launch_configuration_arn = value
elif name == 'InstanceMonitoring':
self.instance_monitoring = value
elif name == 'SpotPrice':
self.spot_price = float(value)
elif name == 'IamInstanceProfile':
self.instance_profile_name = value
elif name == 'EbsOptimized':
self.ebs_optimized = True if value.lower() == 'true' else False
elif name == 'AssociatePublicIpAddress':
self.associate_public_ip_address = True if value.lower() == 'true' else False
elif name == 'VolumeType':
self.volume_type = value
elif name == 'DeleteOnTermination':
if value.lower() == 'true':
self.delete_on_termination = True
else:
self.delete_on_termination = False
elif name == 'Iops':
self.iops = int(value)
elif name == 'ClassicLinkVPCId':
self.classic_link_vpc_id = value
else:
setattr(self, name, value)
def delete(self):
""" Delete this launch configuration. """
return self.connection.delete_launch_configuration(self.name)
| mit |
headedhorseman101/ACE3 | tools/stringtablemerger.py | 52 | 4379 | #!/usr/bin/env python3
import os
import sys
import re
from xml.dom import minidom
# STRINGTABLE MERGER TOOL
# Author: KoffeinFlummi
# --------------------------
# Automatically merges all stringtable entries
# in the given language from the given dir.
def get_modules(projectpath):
""" Get all the modules of the project. """
modules = []
for i in os.listdir(projectpath):
path = os.path.join(projectpath, i)
if not os.path.isdir(path):
continue
if i[0] == ".":
continue
modules.append(i)
return modules
def contains_language(key, language):
""" Checks whether a given key contains a certain language. """
for child in key.childNodes:
try:
assert(child.tagName == language)
return True
except:
pass
return False
def get_entry_by_id(keys, keyid):
""" Returns the first child of keys with ID='keyid'. """
for key in keys:
if key.getAttribute("ID") == keyid:
return key
return False
def replace_entries(oldpath, newpath, language, breakdown):
""" Replaces all new entries of the given language in the given module. """
oldfile = minidom.parse(oldpath)
newfile = minidom.parse(newpath)
oldkeys = oldfile.getElementsByTagName("Key")
newkeys = newfile.getElementsByTagName("Key")
newkeys = list(filter(lambda x: contains_language(x, language), newkeys))
for newkey in newkeys:
keyid = newkey.getAttribute("ID")
oldkey = get_entry_by_id(oldkeys, keyid)
if not oldkey:
continue
if breakdown:
print(" Merging %s translation for %s" % (language, keyid))
newentry = newkey.getElementsByTagName(language)[0].firstChild
try:
# An entry for this language already exists, overwrite it
oldentry = oldkey.getElementsByTagName(language)[0].firstChild
oldentry.replaceWholeText(newentry.wholeText)
except:
# There is no entry for this language yet, make one
oldentry = oldfile.createElement(language)
oldentry.appendChild(oldfile.createTextNode(newentry.wholeText))
# Some whitespace tetris to maintain file structure
oldkey.insertBefore(oldfile.createTextNode("\n "), oldkey.lastChild)
oldkey.insertBefore(oldentry, oldkey.lastChild)
# Make a nice string
xmlstring = oldfile.toxml()
xmlstring = xmlstring.replace('" ?>', '" encoding="utf-8"?>')
# Replace the newlines that minidom swallows
xmlstring = xmlstring.replace("><", ">\n<")
xmlstring += "\n"
fhandle = open(oldpath, "w")
fhandle.write(xmlstring)
fhandle.close()
return len(newkeys)
def main(sourcepath, language, breakdown):
scriptpath = os.path.realpath(__file__)
projectpath = os.path.dirname(os.path.dirname(scriptpath))
projectpath = os.path.join(projectpath, "addons")
modules = get_modules(projectpath)
modulecounter = 0
keycounter = 0
for module in modules:
oldpath = os.path.join(projectpath, module, "stringtable.xml")
newpath = os.path.join(sourcepath, module, "stringtable.xml")
# Some translators extract the lowercase PBOs, so the module name might
# be lowercase (obviously only matters on Linux)
if not os.path.exists(newpath):
newpath = os.path.join(sourcepath, module.lower(), "stringtable.xml")
# Translator didn't include this module, skip
if not os.path.exists(newpath):
continue
keynum = replace_entries(oldpath, newpath, language, breakdown)
modulecounter += 1
keycounter += keynum
print("# Merged %i entry/entries in %s" % (keynum, module))
if breakdown:
print("")
print("")
print("# Merged %i entry/entries in %i modules" % (keycounter, modulecounter))
if __name__ == "__main__":
try:
sourcepath = os.path.normpath(os.path.join(os.getcwd(), sys.argv[1]))
language = sys.argv[2]
assert(os.path.exists(sourcepath))
except:
print("ERROR: Missing arguments of invalid path.")
print("\nUsage:")
print("[script] [path to new project] [language]")
sys.exit(1)
main(sourcepath, language, "--breakdown" in sys.argv)
| gpl-2.0 |
chouseknecht/ansible | lib/ansible/galaxy/login.py | 12 | 4798 | ########################################################################
#
# (C) 2015, Chris Houseknecht <chouse@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import json
from ansible import context
from ansible.errors import AnsibleError
from ansible.module_utils.six.moves import input
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils.urls import open_url
from ansible.utils.color import stringc
from ansible.utils.display import Display
display = Display()
class GalaxyLogin(object):
''' Class to handle authenticating user with Galaxy API prior to performing CUD operations '''
GITHUB_AUTH = 'https://api.github.com/authorizations'
def __init__(self, galaxy, github_token=None):
self.galaxy = galaxy
self.github_username = None
self.github_password = None
self._validate_certs = not context.CLIARGS['ignore_certs']
if github_token is None:
self.get_credentials()
def get_credentials(self):
display.display(u'\n\n' + "We need your " + stringc("GitHub login", 'bright cyan') +
" to identify you.", screen_only=True)
display.display("This information will " + stringc("not be sent to Galaxy", 'bright cyan') +
", only to " + stringc("api.github.com.", "yellow"), screen_only=True)
display.display("The password will not be displayed." + u'\n\n', screen_only=True)
display.display("Use " + stringc("--github-token", 'yellow') +
" if you do not want to enter your password." + u'\n\n', screen_only=True)
try:
self.github_username = input("GitHub Username: ")
except Exception:
pass
try:
self.github_password = getpass.getpass("Password for %s: " % self.github_username)
except Exception:
pass
if not self.github_username or not self.github_password:
raise AnsibleError("Invalid GitHub credentials. Username and password are required.")
def remove_github_token(self):
'''
If for some reason an ansible-galaxy token was left from a prior login, remove it. We cannot
retrieve the token after creation, so we are forced to create a new one.
'''
try:
tokens = json.load(open_url(self.GITHUB_AUTH, url_username=self.github_username,
url_password=self.github_password, force_basic_auth=True,
validate_certs=self._validate_certs))
except HTTPError as e:
res = json.load(e)
raise AnsibleError(res['message'])
for token in tokens:
if token['note'] == 'ansible-galaxy login':
display.vvvvv('removing token: %s' % token['token_last_eight'])
try:
open_url('https://api.github.com/authorizations/%d' % token['id'],
url_username=self.github_username, url_password=self.github_password, method='DELETE',
force_basic_auth=True, validate_certs=self._validate_certs)
except HTTPError as e:
res = json.load(e)
raise AnsibleError(res['message'])
def create_github_token(self):
'''
Create a personal authorization token with a note of 'ansible-galaxy login'
'''
self.remove_github_token()
args = json.dumps({"scopes": ["public_repo"], "note": "ansible-galaxy login"})
try:
data = json.load(open_url(self.GITHUB_AUTH, url_username=self.github_username,
url_password=self.github_password, force_basic_auth=True, data=args,
validate_certs=self._validate_certs))
except HTTPError as e:
res = json.load(e)
raise AnsibleError(res['message'])
return data['token']
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.