repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
wbc2010/django1.2.5 | django1.2.5/tests/regressiontests/cache/tests.py | 38 | 28208 | # -*- coding: utf-8 -*-
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
import os
import tempfile
import time
import unittest
import warnings
from django.conf import settings
from django.core import management
from django.core.cache import get_cache
from django.core.cache.backends.base import CacheKeyWarning
from django.http import HttpResponse, HttpRequest
from django.middleware.cache import FetchFromCacheMiddleware, UpdateCacheMiddleware
from django.test import TestCase
from django.test.utils import get_warnings_state, restore_warnings_state
from django.utils import translation
from django.utils.cache import patch_vary_headers, get_cache_key, learn_cache_key
from django.utils.hashcompat import md5_constructor
from regressiontests.cache.models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class DummyCacheTests(unittest.TestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has different test requirements.
def setUp(self):
self.cache = get_cache('dummy://')
def test_simple(self):
"Dummy cache backend ignores cache set calls"
self.cache.set("key", "value")
self.assertEqual(self.cache.get("key"), None)
def test_add(self):
"Add doesn't do anything in dummy cache backend"
self.cache.add("addkey1", "value")
result = self.cache.add("addkey1", "newvalue")
self.assertEqual(result, True)
self.assertEqual(self.cache.get("addkey1"), None)
def test_non_existent(self):
"Non-existent keys aren't found in the dummy cache backend"
self.assertEqual(self.cache.get("does_not_exist"), None)
self.assertEqual(self.cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
self.cache.set('a', 'a')
self.cache.set('b', 'b')
self.cache.set('c', 'c')
self.cache.set('d', 'd')
self.assertEqual(self.cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(self.cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
self.cache.set("key1", "spam")
self.cache.set("key2", "eggs")
self.assertEqual(self.cache.get("key1"), None)
self.cache.delete("key1")
self.assertEqual(self.cache.get("key1"), None)
self.assertEqual(self.cache.get("key2"), None)
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
self.cache.set("hello1", "goodbye1")
self.assertEqual(self.cache.has_key("hello1"), False)
self.assertEqual(self.cache.has_key("goodbye1"), False)
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
self.cache.set("hello2", "goodbye2")
self.assertEqual("hello2" in self.cache, False)
self.assertEqual("goodbye2" in self.cache, False)
def test_incr(self):
"Dummy cache values can't be incremented"
self.cache.set('answer', 42)
self.assertRaises(ValueError, self.cache.incr, 'answer')
self.assertRaises(ValueError, self.cache.incr, 'does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
self.cache.set('answer', 42)
self.assertRaises(ValueError, self.cache.decr, 'answer')
self.assertRaises(ValueError, self.cache.decr, 'does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string' : 'this is a string',
'int' : 42,
'list' : [1, 2, 3, 4],
'tuple' : (1, 2, 3, 4),
'dict' : {'A': 1, 'B' : 2},
'function' : f,
'class' : C,
}
self.cache.set("stuff", stuff)
self.assertEqual(self.cache.get("stuff"), None)
def test_expiration(self):
"Expiration has no effect on the dummy cache"
self.cache.set('expire1', 'very quickly', 1)
self.cache.set('expire2', 'very quickly', 1)
self.cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertEqual(self.cache.get("expire1"), None)
self.cache.add("expire2", "newvalue")
self.assertEqual(self.cache.get("expire2"), None)
self.assertEqual(self.cache.has_key("expire3"), False)
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
u'ascii': u'ascii_value',
u'unicode_ascii': u'Iñtërnâtiônàlizætiøn1',
u'Iñtërnâtiônàlizætiøn': u'Iñtërnâtiônàlizætiøn2',
u'ascii': {u'x' : 1 }
}
for (key, value) in stuff.items():
self.cache.set(key, value)
self.assertEqual(self.cache.get(key), None)
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
self.cache.set_many({'a': 1, 'b': 2})
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
self.cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
self.cache.clear()
class BaseCacheTests(object):
# A common set of tests to apply to all cache backends
def tearDown(self):
self.cache.clear()
def test_simple(self):
# Simple cache set/get works
self.cache.set("key", "value")
self.assertEqual(self.cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
self.cache.add("addkey1", "value")
result = self.cache.add("addkey1", "newvalue")
self.assertEqual(result, False)
self.assertEqual(self.cache.get("addkey1"), "value")
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertEqual(self.cache.get("does_not_exist"), None)
self.assertEqual(self.cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
self.cache.set('a', 'a')
self.cache.set('b', 'b')
self.cache.set('c', 'c')
self.cache.set('d', 'd')
self.assertEqual(self.cache.get_many(['a', 'c', 'd']), {'a' : 'a', 'c' : 'c', 'd' : 'd'})
self.assertEqual(self.cache.get_many(['a', 'b', 'e']), {'a' : 'a', 'b' : 'b'})
def test_delete(self):
# Cache keys can be deleted
self.cache.set("key1", "spam")
self.cache.set("key2", "eggs")
self.assertEqual(self.cache.get("key1"), "spam")
self.cache.delete("key1")
self.assertEqual(self.cache.get("key1"), None)
self.assertEqual(self.cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
self.cache.set("hello1", "goodbye1")
self.assertEqual(self.cache.has_key("hello1"), True)
self.assertEqual(self.cache.has_key("goodbye1"), False)
def test_in(self):
# The in operator can be used to inspet cache contents
self.cache.set("hello2", "goodbye2")
self.assertEqual("hello2" in self.cache, True)
self.assertEqual("goodbye2" in self.cache, False)
def test_incr(self):
# Cache values can be incremented
self.cache.set('answer', 41)
self.assertEqual(self.cache.incr('answer'), 42)
self.assertEqual(self.cache.get('answer'), 42)
self.assertEqual(self.cache.incr('answer', 10), 52)
self.assertEqual(self.cache.get('answer'), 52)
self.assertRaises(ValueError, self.cache.incr, 'does_not_exist')
def test_decr(self):
# Cache values can be decremented
self.cache.set('answer', 43)
self.assertEqual(self.cache.decr('answer'), 42)
self.assertEqual(self.cache.get('answer'), 42)
self.assertEqual(self.cache.decr('answer', 10), 32)
self.assertEqual(self.cache.get('answer'), 32)
self.assertRaises(ValueError, self.cache.decr, 'does_not_exist')
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string' : 'this is a string',
'int' : 42,
'list' : [1, 2, 3, 4],
'tuple' : (1, 2, 3, 4),
'dict' : {'A': 1, 'B' : 2},
'function' : f,
'class' : C,
}
self.cache.set("stuff", stuff)
self.assertEqual(self.cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
self.cache.set('question', my_poll)
cached_poll = self.cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
self.cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cached_polls = self.cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
self.cache.set('expire1', 'very quickly', 1)
self.cache.set('expire2', 'very quickly', 1)
self.cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertEqual(self.cache.get("expire1"), None)
self.cache.add("expire2", "newvalue")
self.assertEqual(self.cache.get("expire2"), "newvalue")
self.assertEqual(self.cache.has_key("expire3"), False)
def test_unicode(self):
# Unicode values can be cached
stuff = {
u'ascii': u'ascii_value',
u'unicode_ascii': u'Iñtërnâtiônàlizætiøn1',
u'Iñtërnâtiônàlizætiøn': u'Iñtërnâtiônàlizætiøn2',
u'ascii': {u'x' : 1 }
}
for (key, value) in stuff.items():
self.cache.set(key, value)
self.assertEqual(self.cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cachable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value)
self.cache.set('binary1', compressed_value)
compressed_result = self.cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result))
def test_set_many(self):
# Multiple keys can be set using set_many
self.cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(self.cache.get("key1"), "spam")
self.assertEqual(self.cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
self.cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertEqual(self.cache.get("key1"), None)
self.assertEqual(self.cache.get("key2"), None)
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
self.cache.set("key1", "spam")
self.cache.set("key2", "eggs")
self.cache.set("key3", "ham")
self.cache.delete_many(["key1", "key2"])
self.assertEqual(self.cache.get("key1"), None)
self.assertEqual(self.cache.get("key2"), None)
self.assertEqual(self.cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
self.cache.set("key1", "spam")
self.cache.set("key2", "eggs")
self.cache.clear()
self.assertEqual(self.cache.get("key1"), None)
self.assertEqual(self.cache.get("key2"), None)
def test_long_timeout(self):
'''
Using a timeout greater than 30 days makes memcached think
it is an absolute expiration timestamp instead of a relative
offset. Test that we honour this convention. Refs #12399.
'''
self.cache.set('key1', 'eggs', 60*60*24*30 + 1) #30 days + 1 second
self.assertEqual(self.cache.get('key1'), 'eggs')
self.cache.add('key2', 'ham', 60*60*24*30 + 1)
self.assertEqual(self.cache.get('key2'), 'ham')
self.cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60*60*24*30 + 1)
self.assertEqual(self.cache.get('key3'), 'sausage')
self.assertEqual(self.cache.get('key4'), 'lobster bisque')
def perform_cull_test(self, initial_count, final_count):
"""This is implemented as a utility method, because only some of the backends
implement culling. The culling algorithm also varies slightly, so the final
number of entries will vary between backends"""
# Create initial cache key entries. This will overflow the cache, causing a cull
for i in range(1, initial_count):
self.cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if self.cache.has_key('cull%d' % i):
count = count + 1
self.assertEqual(count, final_count)
def test_invalid_keys(self):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# On Python 2.6+ we could use the catch_warnings context
# manager to test this warning nicely. Since we can't do that
# yet, the cleanest option is to temporarily ask for
# CacheKeyWarning to be raised as an exception.
_warnings_state = get_warnings_state()
warnings.simplefilter("error", CacheKeyWarning)
try:
# memcached does not allow whitespace or control characters in keys
self.assertRaises(CacheKeyWarning, self.cache.set, 'key with spaces', 'value')
# memcached limits key length to 250
self.assertRaises(CacheKeyWarning, self.cache.set, 'a' * 251, 'value')
finally:
restore_warnings_state(_warnings_state)
class DBCacheTests(unittest.TestCase, BaseCacheTests):
def setUp(self):
# Spaces are used in the table name to ensure quoting/escaping is working
self._table_name = 'test cache table'
management.call_command('createcachetable', self._table_name, verbosity=0, interactive=False)
self.cache = get_cache('db://%s?max_entries=30' % self._table_name)
def tearDown(self):
from django.db import connection
cursor = connection.cursor()
cursor.execute('DROP TABLE %s' % connection.ops.quote_name(self._table_name))
def test_cull(self):
self.perform_cull_test(50, 29)
class LocMemCacheTests(unittest.TestCase, BaseCacheTests):
def setUp(self):
self.cache = get_cache('locmem://?max_entries=30')
def test_cull(self):
self.perform_cull_test(50, 29)
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain a CACHE_BACKEND setting that points at
# your memcache server.
if settings.CACHE_BACKEND.startswith('memcached://'):
class MemcachedCacheTests(unittest.TestCase, BaseCacheTests):
def setUp(self):
self.cache = get_cache(settings.CACHE_BACKEND)
def test_invalid_keys(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
self.assertRaises(Exception, self.cache.set, 'key with spaces', 'value')
# memcached limits key length to 250
self.assertRaises(Exception, self.cache.set, 'a' * 251, 'value')
class FileBasedCacheTests(unittest.TestCase, BaseCacheTests):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
self.dirname = tempfile.mkdtemp()
self.cache = get_cache('file://%s?max_entries=30' % self.dirname)
def test_hashing(self):
"""Test that keys are hashed into subdirectories correctly"""
self.cache.set("foo", "bar")
keyhash = md5_constructor("foo").hexdigest()
keypath = os.path.join(self.dirname, keyhash[:2], keyhash[2:4], keyhash[4:])
self.assert_(os.path.exists(keypath))
def test_subdirectory_removal(self):
"""
Make sure that the created subdirectories are correctly removed when empty.
"""
self.cache.set("foo", "bar")
keyhash = md5_constructor("foo").hexdigest()
keypath = os.path.join(self.dirname, keyhash[:2], keyhash[2:4], keyhash[4:])
self.assert_(os.path.exists(keypath))
self.cache.delete("foo")
self.assert_(not os.path.exists(keypath))
self.assert_(not os.path.exists(os.path.dirname(keypath)))
self.assert_(not os.path.exists(os.path.dirname(os.path.dirname(keypath))))
def test_cull(self):
self.perform_cull_test(50, 28)
class CustomCacheKeyValidationTests(unittest.TestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
cache = get_cache('regressiontests.cache.liberal_backend://')
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
class CacheUtils(unittest.TestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.path = '/cache/test/'
self.old_settings_key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.old_middleware_seconds = settings.CACHE_MIDDLEWARE_SECONDS
self.orig_use_i18n = settings.USE_I18N
settings.CACHE_MIDDLEWARE_KEY_PREFIX = 'settingsprefix'
settings.CACHE_MIDDLEWARE_SECONDS = 1
settings.USE_I18N = False
def tearDown(self):
settings.CACHE_MIDDLEWARE_KEY_PREFIX = self.old_settings_key_prefix
settings.CACHE_MIDDLEWARE_SECONDS = self.old_middleware_seconds
settings.USE_I18N = self.orig_use_i18n
def _get_request(self, path):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
request.path = request.path_info = "/cache/%s" % path
return request
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self._get_request(self.path)
response = HttpResponse()
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.a8c87a3d8c44853d7f79474f7ffe4ad5.d41d8cd98f00b204e9800998ecf8427e')
# Verify that a specified key_prefix is taken in to account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(get_cache_key(request, key_prefix=key_prefix), 'views.decorators.cache.cache_page.localprefix.a8c87a3d8c44853d7f79474f7ffe4ad5.d41d8cd98f00b204e9800998ecf8427e')
def test_learn_cache_key(self):
request = self._get_request(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.a8c87a3d8c44853d7f79474f7ffe4ad5.d41d8cd98f00b204e9800998ecf8427e')
class CacheI18nTest(unittest.TestCase):
def setUp(self):
self.orig_cache_middleware_seconds = settings.CACHE_MIDDLEWARE_SECONDS
self.orig_cache_middleware_key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.orig_cache_backend = settings.CACHE_BACKEND
self.orig_use_i18n = settings.USE_I18N
self.orig_languages = settings.LANGUAGES
settings.LANGUAGES = (
('en', 'English'),
('es', 'Spanish'),
)
settings.CACHE_MIDDLEWARE_KEY_PREFIX = 'settingsprefix'
self.path = '/cache/test/'
def tearDown(self):
settings.CACHE_MIDDLEWARE_SECONDS = self.orig_cache_middleware_seconds
settings.CACHE_MIDDLEWARE_KEY_PREFIX = self.orig_cache_middleware_key_prefix
settings.CACHE_BACKEND = self.orig_cache_backend
settings.USE_I18N = self.orig_use_i18n
settings.LANGUAGES = self.orig_languages
translation.deactivate()
def _get_request(self):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
request.path = request.path_info = self.path
return request
def _get_request_cache(self):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
request.path = request.path_info = self.path
request._cache_update_cache = True
request.method = 'GET'
request.session = {}
return request
def test_cache_key_i18n(self):
settings.USE_I18N = True
request = self._get_request()
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertTrue(key.endswith(lang), "Cache keys should include the language name when i18n is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def test_cache_key_no_i18n (self):
settings.USE_I18N = False
request = self._get_request()
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertFalse(key.endswith(lang), "Cache keys shouldn't include the language name when i18n is inactive")
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content= msg
return UpdateCacheMiddleware().process_response(request, response)
settings.CACHE_MIDDLEWARE_SECONDS = 60
settings.CACHE_MIDDLEWARE_KEY_PREFIX="test"
settings.CACHE_BACKEND='locmem:///'
settings.USE_I18N = True
en_message ="Hello world!"
es_message ="Hola mundo!"
request = self._get_request_cache()
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# Check that we can recover the cache
self.assertNotEqual(get_cache_data.content, None)
self.assertEqual(en_message, get_cache_data.content)
# change the session language and set content
request = self._get_request_cache()
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message)
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message)
class CacheMiddlewareAnonymousOnlyTests(TestCase):
urls = 'regressiontests.cache.urls'
def setUp(self):
self._orig_cache_middleware_anonymous_only = \
getattr(settings, 'CACHE_MIDDLEWARE_ANONYMOUS_ONLY', False)
self._orig_middleware_classes = settings.MIDDLEWARE_CLASSES
settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES)
settings.MIDDLEWARE_CLASSES.insert(0, 'django.middleware.cache.UpdateCacheMiddleware')
settings.MIDDLEWARE_CLASSES += ['django.middleware.cache.FetchFromCacheMiddleware']
def tearDown(self):
settings.CACHE_MIDDLEWARE_ANONYMOUS_ONLY = self._orig_cache_middleware_anonymous_only
settings.MIDDLEWARE_CLASSES = self._orig_middleware_classes
def test_cache_middleware_anonymous_only_does_not_cause_vary_cookie(self):
settings.CACHE_MIDDLEWARE_ANONYMOUS_ONLY = True
response = self.client.get('/')
self.failIf('Cookie' in response.get('Vary', ''))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
datakortet/django-cms | cms/apphook_pool.py | 6 | 2605 | # -*- coding: utf-8 -*-
from cms.exceptions import AppAlreadyRegistered
from cms.utils.conf import get_cms_setting
from cms.utils.django_load import load, iterload_objects
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
import warnings
class ApphookPool(object):
def __init__(self):
self.apps = {}
self.discovered = False
self.block_register = False
def discover_apps(self):
if self.discovered:
return
#import all the modules
apphooks = get_cms_setting('APPHOOKS')
if apphooks:
self.block_register = True
for cls in iterload_objects(apphooks):
self.block_register = False
self.register(cls)
self.block_register = True
self.block_register = False
else:
load('cms_app')
self.discovered = True
def clear(self):
self.apps = {}
self.discovered = False
def register(self, app):
if self.block_register:
return
from cms.app_base import CMSApp
# validate the app
if not issubclass(app, CMSApp):
raise ImproperlyConfigured('CMS Apps must inherit '
'cms.app_base.CMSApp, %r does not' % app)
if hasattr(app, 'menu') and not app.menus:
warnings.warn("You define a 'menu' attribute on your CMS App %r, "
"but the 'menus' attribute is empty, did you make a typo?")
name = app.__name__
if name in self.apps.keys():
raise AppAlreadyRegistered, "[%s] a cms app with this name is already registered" % name
self.apps[name] = app
def get_apphooks(self):
self.discover_apps()
hooks = []
for app_name in self.apps.keys():
app = self.apps[app_name]
hooks.append((app_name, app.name))
# Unfortunately, we loose the ordering since we now have a list of tuples. Let's reorder by app_name:
hooks = sorted(hooks, key=lambda hook: hook[1])
return hooks
def get_apphook(self, app_name):
self.discover_apps()
try:
return self.apps[app_name]
except KeyError:
# deprecated: return apphooks registered in db with urlconf name instead of apphook class name
for app in self.apps.values():
if app_name in app.urls:
return app
raise ImproperlyConfigured('No registered apphook `%s` found.' % app_name)
apphook_pool = ApphookPool()
| bsd-3-clause |
sfjuocekr/PokeIV | pokeIV.py | 1 | 7510 | #!/usr/bin/env python
#This software uses pgoapi - see pgoapi/LICENSE.txt
import os
import re
import sys
import json
import time
import struct
import pprint
import logging
import requests
import argparse
import getpass
import csv
import time
from tkinter import ttk
import tkinter as tk
from collections import OrderedDict
from pokemondata import PokemonData
from pokeivwindow import PokeIVWindow
# add directory of this file to PATH, so that the package will be found
try:
root = os.path.normpath(os.path.dirname(os.path.realpath(__file__)))
except NameError:
root = os.path.normpath(os.path.dirname(os.path.realpath(sys.argv[0])))
sys.path.append(os.path.dirname(root))
# import Pokemon Go API lib
from pgoapi import pgoapi
from pgoapi import utilities as util
# other stuff
from google.protobuf.internal import encoder
from geopy.geocoders import GoogleV3
from s2sphere import Cell, CellId, LatLng
log = logging.getLogger(__name__)
def setupLogger():
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format='%(asctime)s [%(module)10s] [%(levelname)5s] %(message)s')
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("pgoapi").setLevel(logging.INFO)
logging.getLogger("rpc_api").setLevel(logging.INFO)
def init_config():
parser = argparse.ArgumentParser()
config_file = "config.json"
load = {}
if os.path.isfile(config_file):
with open(config_file) as data:
load.update(json.load(data))
required = lambda x: not x in load
parser.add_argument("-a", "--auth_service", help="Auth Service ('ptc' or 'google')")
parser.add_argument("-u", "--username", help="Username")
parser.add_argument("-p", "--password", help="Password")
parser.add_argument("-l", "--location", help="Physical location of your character")
parser.add_argument("-m", "--minimumIV", help="All pokemon equal to or above this IV value are kept regardless of duplicates")
parser.add_argument("-me", "--max_evolutions", help="Maximum number of evolutions in one pass")
parser.add_argument("-ed", "--evolution_delay", help="delay between evolutions in seconds")
parser.add_argument("-td", "--transfer_delay", help="delay between transfers in seconds")
parser.add_argument("-rd", "--rename_delay", help="delay between renames in seconds")
parser.add_argument("-ud", "--upgrade_delay", help="delay between upgrades in seconds")
parser.add_argument("-hm", "--hard_minimum", help="transfer candidates will be selected if they are below minimumIV (will transfer unique pokemon)", action="store_true")
parser.add_argument("-cp", "--cp_override", help="will keep pokemon that have CP equal to or above the given limit, regardless of IV")
parser.add_argument("-v", "--verbose", help="displays additional information about each pokemon", action="store_true")
parser.add_argument("-el", "--evolve_list", help="Evolve lsit has been deprecated. Please use white list instead (-wl).", action="append")
parser.add_argument("-wl", "--white_list", help="list of the only pokemon to transfer and evolve by ID or name (ex: -wl 1 = -wl bulbasaur)", action="append")
parser.add_argument("-bl", "--black_list", help="list of the pokemon not to transfer and evolve by ID or name (ex: -bl 1 = -bl bulbasaur)", action="append")
parser.add_argument("-f", "--force", help="forces all pokemon not passing the IV threshold to be transfer candidates regardless of evolution", action="store_true")
parser.add_argument("-rf", "--rename_format", help="The pokemon renaming format. See config comments")
parser.add_argument("-eq", "--equation", help="Equation to use for IV calculation--see config file for details")
parser.add_argument("-dn", "--display_nickname", help="Display nicknames instead of pokemon type", action="store_true")
parser.add_argument("-la", "--language", help="Pokemon names are displayed in the given language. German and English currently supported")
config = parser.parse_args()
for key in config.__dict__:
if key in load and config.__dict__[key] is None and load[key]:
if key == "black_list" or key == "white_list":
config.__dict__[key] = str(load[key]).split(',')
else:
config.__dict__[key] = str(load[key])
elif key in load and (type(config.__dict__[key]) == type(True)) and not config.__dict__[key] and load[key]: #if it's boolean and false
if str(load[key]) == "True":
config.__dict__[key] = True
if config.__dict__["minimumIV"] is None:
config.__dict__["minimumIV"] = "101"
if config.__dict__["max_evolutions"] is None:
config.__dict__["max_evolutions"] = "71"
if config.__dict__["evolution_delay"] is None:
config.__dict__["evolution_delay"] = "25"
if config.__dict__["transfer_delay"] is None:
config.__dict__["transfer_delay"] = "10"
if config.white_list is not None and config.black_list is not None:
logging.error("Black list and white list can not be used together.")
return
if config.evolve_list is not None:
logging.error("Evolve list has been deprecated. Please use white list instead (-wl).")
return
if config.white_list is not None:
config.white_list = [x.lower() for x in config.white_list]
if config.black_list is not None:
config.black_list = [x.lower() for x in config.black_list]
return OrderedDict(sorted(vars(config).items()))
def main():
setupLogger()
log.debug('Logger set up')
#-- initialize config
config = init_config()
if not config:
return
if config["password"] is None or config["username"] is None or config["auth_service"] not in ['ptc', 'google'] or config["location"] is None:
start(config)
else:
start(config, login=True)
def start(config, login=False):
# -- dictionaries for pokedex, families, and evolution prices
with open(os.path.normpath(os.path.join(root, 'pokemon.json'))) as f:
pokemonInfo = json.load(f)
with open(os.path.normpath(os.path.join(root, 'moves.json'))) as f:
moveInfo = json.load(f)
with open(os.path.normpath(os.path.join(root, 'types.json'))) as f:
types = json.load(f)
with open('german-names.tsv') as f:
f.readline()
german = dict(csv.reader(f, delimiter='\t'))
with open('families.tsv') as f:
f.readline()
family = dict(csv.reader(f, delimiter='\t'))
with open('evolves.tsv') as f:
f.readline()
cost = dict(csv.reader(f, delimiter='\t'))
pokedex = dict([(int(p["Number"]),p["Name"]) for p in pokemonInfo])
moves = dict([(int(m["id"]),{"type":m["type"],"name":m["name"]}) for m in moveInfo])
# -- change language if selected -->
if config["language"] is not None and config["language"].lower() == 'german':
for k,v in pokedex.items():
pokedex[k] = german[str(k)];
# instantiate pgoapi
api = pgoapi.PGoApi()
data = PokemonData(pokedex, moves, types, family, cost, config, api, login)
main_window = tk.Tk()
main_window.style = ttk.Style()
main_window.style.theme_use("classic")
app = PokeIVWindow(config,data,master=main_window)
app.mainloop()
if __name__ == '__main__':
main()
| mit |
ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/django-1.2/django/contrib/contenttypes/generic.py | 44 | 16570 | """
Classes allowing "generic" relations through ContentType and object-id fields.
"""
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.db.models import signals
from django.db import models, router
from django.db.models.fields.related import RelatedField, Field, ManyToManyRel
from django.db.models.loading import get_model
from django.forms import ModelForm
from django.forms.models import BaseModelFormSet, modelformset_factory, save_instance
from django.contrib.admin.options import InlineModelAdmin, flatten_fieldsets
from django.utils.encoding import smart_unicode
class GenericForeignKey(object):
"""
Provides a generic relation to any object through content-type/object-id
fields.
"""
def __init__(self, ct_field="content_type", fk_field="object_id"):
self.ct_field = ct_field
self.fk_field = fk_field
def contribute_to_class(self, cls, name):
self.name = name
self.model = cls
self.cache_attr = "_%s_cache" % name
cls._meta.add_virtual_field(self)
# For some reason I don't totally understand, using weakrefs here doesn't work.
signals.pre_init.connect(self.instance_pre_init, sender=cls, weak=False)
# Connect myself as the descriptor for this field
setattr(cls, name, self)
def instance_pre_init(self, signal, sender, args, kwargs, **_kwargs):
"""
Handles initializing an object with the generic FK instaed of
content-type/object-id fields.
"""
if self.name in kwargs:
value = kwargs.pop(self.name)
kwargs[self.ct_field] = self.get_content_type(obj=value)
kwargs[self.fk_field] = value._get_pk_val()
def get_content_type(self, obj=None, id=None, using=None):
# Convenience function using get_model avoids a circular import when
# using this model
ContentType = get_model("contenttypes", "contenttype")
if obj:
return ContentType.objects.db_manager(obj._state.db).get_for_model(obj)
elif id:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
# This should never happen. I love comments like this, don't you?
raise Exception("Impossible arguments to GFK.get_content_type!")
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_attr)
except AttributeError:
rel_obj = None
# Make sure to use ContentType.objects.get_for_id() to ensure that
# lookups are cached (see ticket #5570). This takes more code than
# the naive ``getattr(instance, self.ct_field)``, but has better
# performance when dealing with GFKs in loops and such.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
if ct_id:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=getattr(instance, self.fk_field))
except ObjectDoesNotExist:
pass
setattr(instance, self.cache_attr, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError(u"%s must be accessed via instance" % self.related.opts.object_name)
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value._get_pk_val()
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
setattr(instance, self.cache_attr, value)
class GenericRelation(RelatedField, Field):
"""Provides an accessor to generic related objects (e.g. comments)"""
def __init__(self, to, **kwargs):
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = GenericRel(to,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', True))
# Override content-type/object-id field names on the related class
self.object_id_field_name = kwargs.pop("object_id_field", "object_id")
self.content_type_field_name = kwargs.pop("content_type_field", "content_type")
kwargs['blank'] = True
kwargs['editable'] = False
kwargs['serialize'] = False
Field.__init__(self, **kwargs)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return smart_unicode([instance._get_pk_val() for instance in qs])
def m2m_db_table(self):
return self.rel.to._meta.db_table
def m2m_column_name(self):
return self.object_id_field_name
def m2m_reverse_name(self):
return self.rel.to._meta.pk.column
def m2m_target_field_name(self):
return self.model._meta.pk.name
def m2m_reverse_target_field_name(self):
return self.rel.to._meta.pk.name
def contribute_to_class(self, cls, name):
super(GenericRelation, self).contribute_to_class(cls, name)
# Save a reference to which model this class is on for future use
self.model = cls
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseGenericRelatedObjectsDescriptor(self))
def contribute_to_related_class(self, cls, related):
pass
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def db_type(self, connection):
# Since we're simulating a ManyToManyField, in effect, best return the
# same db_type as well.
return None
def extra_filters(self, pieces, pos, negate):
"""
Return an extra filter to the queryset so that the results are filtered
on the appropriate content type.
"""
if negate:
return []
ContentType = get_model("contenttypes", "contenttype")
content_type = ContentType.objects.get_for_model(self.model)
prefix = "__".join(pieces[:pos + 1])
return [("%s__%s" % (prefix, self.content_type_field_name),
content_type)]
class ReverseGenericRelatedObjectsDescriptor(object):
"""
This class provides the functionality that makes the related-object
managers available as attributes on a model class, for fields that have
multiple "remote" values and have a GenericRelation defined in their model
(rather than having another model pointed *at* them). In the example
"article.publications", the publications attribute is a
ReverseGenericRelatedObjectsDescriptor instance.
"""
def __init__(self, field):
self.field = field
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# This import is done here to avoid circular import importing this module
from django.contrib.contenttypes.models import ContentType
# Dynamically create a class that subclasses the related model's
# default manager.
rel_model = self.field.rel.to
superclass = rel_model._default_manager.__class__
RelatedManager = create_generic_related_manager(superclass)
qn = connection.ops.quote_name
manager = RelatedManager(
model = rel_model,
instance = instance,
symmetrical = (self.field.rel.symmetrical and instance.__class__ == rel_model),
join_table = qn(self.field.m2m_db_table()),
source_col_name = qn(self.field.m2m_column_name()),
target_col_name = qn(self.field.m2m_reverse_name()),
content_type = ContentType.objects.db_manager(instance._state.db).get_for_model(instance),
content_type_field_name = self.field.content_type_field_name,
object_id_field_name = self.field.object_id_field_name
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
manager = self.__get__(instance)
manager.clear()
for obj in value:
manager.add(obj)
def create_generic_related_manager(superclass):
"""
Factory function for a manager that subclasses 'superclass' (which is a
Manager) and adds behavior for generic related objects.
"""
class GenericRelatedObjectManager(superclass):
def __init__(self, model=None, core_filters=None, instance=None, symmetrical=None,
join_table=None, source_col_name=None, target_col_name=None, content_type=None,
content_type_field_name=None, object_id_field_name=None):
super(GenericRelatedObjectManager, self).__init__()
self.core_filters = core_filters or {}
self.model = model
self.content_type = content_type
self.symmetrical = symmetrical
self.instance = instance
self.join_table = join_table
self.join_table = model._meta.db_table
self.source_col_name = source_col_name
self.target_col_name = target_col_name
self.content_type_field_name = content_type_field_name
self.object_id_field_name = object_id_field_name
self.pk_val = self.instance._get_pk_val()
def get_query_set(self):
db = self._db or router.db_for_read(self.model, instance=self.instance)
query = {
'%s__pk' % self.content_type_field_name : self.content_type.id,
'%s__exact' % self.object_id_field_name : self.pk_val,
}
return superclass.get_query_set(self).using(db).filter(**query)
def add(self, *objs):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
obj.save()
add.alters_data = True
def remove(self, *objs):
db = router.db_for_write(self.model, instance=self.instance)
for obj in objs:
obj.delete(using=db)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.model, instance=self.instance)
for obj in self.all():
obj.delete(using=db)
clear.alters_data = True
def create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).create(**kwargs)
create.alters_data = True
return GenericRelatedObjectManager
class GenericRel(ManyToManyRel):
def __init__(self, to, related_name=None, limit_choices_to=None, symmetrical=True):
self.to = to
self.related_name = related_name
self.limit_choices_to = limit_choices_to or {}
self.symmetrical = symmetrical
self.multiple = True
self.through = None
class BaseGenericInlineFormSet(BaseModelFormSet):
"""
A formset for generic inline objects to a parent.
"""
def __init__(self, data=None, files=None, instance=None, save_as_new=None,
prefix=None, queryset=None):
# Avoid a circular import.
from django.contrib.contenttypes.models import ContentType
opts = self.model._meta
self.instance = instance
self.rel_name = '-'.join((
opts.app_label, opts.object_name.lower(),
self.ct_field.name, self.ct_fk_field.name,
))
if self.instance is None or self.instance.pk is None:
qs = self.model._default_manager.none()
else:
if queryset is None:
queryset = self.model._default_manager
qs = queryset.filter(**{
self.ct_field.name: ContentType.objects.get_for_model(self.instance),
self.ct_fk_field.name: self.instance.pk,
})
super(BaseGenericInlineFormSet, self).__init__(
queryset=qs, data=data, files=files,
prefix=prefix
)
#@classmethod
def get_default_prefix(cls):
opts = cls.model._meta
return '-'.join((opts.app_label, opts.object_name.lower(),
cls.ct_field.name, cls.ct_fk_field.name,
))
get_default_prefix = classmethod(get_default_prefix)
def save_new(self, form, commit=True):
# Avoid a circular import.
from django.contrib.contenttypes.models import ContentType
kwargs = {
self.ct_field.get_attname(): ContentType.objects.get_for_model(self.instance).pk,
self.ct_fk_field.get_attname(): self.instance.pk,
}
new_obj = self.model(**kwargs)
return save_instance(form, new_obj, commit=commit)
def generic_inlineformset_factory(model, form=ModelForm,
formset=BaseGenericInlineFormSet,
ct_field="content_type", fk_field="object_id",
fields=None, exclude=None,
extra=3, can_order=False, can_delete=True,
max_num=None,
formfield_callback=lambda f: f.formfield()):
"""
Returns an ``GenericInlineFormSet`` for the given kwargs.
You must provide ``ct_field`` and ``object_id`` if they different from the
defaults ``content_type`` and ``object_id`` respectively.
"""
opts = model._meta
# Avoid a circular import.
from django.contrib.contenttypes.models import ContentType
# if there is no field called `ct_field` let the exception propagate
ct_field = opts.get_field(ct_field)
if not isinstance(ct_field, models.ForeignKey) or ct_field.rel.to != ContentType:
raise Exception("fk_name '%s' is not a ForeignKey to ContentType" % ct_field)
fk_field = opts.get_field(fk_field) # let the exception propagate
if exclude is not None:
exclude = list(exclude)
exclude.extend([ct_field.name, fk_field.name])
else:
exclude = [ct_field.name, fk_field.name]
FormSet = modelformset_factory(model, form=form,
formfield_callback=formfield_callback,
formset=formset,
extra=extra, can_delete=can_delete, can_order=can_order,
fields=fields, exclude=exclude, max_num=max_num)
FormSet.ct_field = ct_field
FormSet.ct_fk_field = fk_field
return FormSet
class GenericInlineModelAdmin(InlineModelAdmin):
ct_field = "content_type"
ct_fk_field = "object_id"
formset = BaseGenericInlineFormSet
def get_formset(self, request, obj=None):
if self.declared_fieldsets:
fields = flatten_fieldsets(self.declared_fieldsets)
else:
fields = None
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
exclude = exclude or None
defaults = {
"ct_field": self.ct_field,
"fk_field": self.ct_fk_field,
"form": self.form,
"formfield_callback": self.formfield_for_dbfield,
"formset": self.formset,
"extra": self.extra,
"can_delete": self.can_delete,
"can_order": False,
"fields": fields,
"max_num": self.max_num,
"exclude": exclude
}
return generic_inlineformset_factory(self.model, **defaults)
class GenericStackedInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class GenericTabularInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
| bsd-3-clause |
myang321/django | django/utils/tree.py | 372 | 4883 | """
A class for storing a tree graph. Primarily used for filter constructs in the
ORM.
"""
import copy
class Node(object):
"""
A single internal node in the tree graph. A Node should be viewed as a
connection (the root) with the children being either leaf nodes or other
Node instances.
"""
# Standard connector type. Clients usually won't use this at all and
# subclasses will usually override the value.
default = 'DEFAULT'
def __init__(self, children=None, connector=None, negated=False):
"""
Constructs a new Node. If no connector is given, the default will be
used.
"""
self.children = children[:] if children else []
self.connector = connector or self.default
self.negated = negated
# We need this because of django.db.models.query_utils.Q. Q. __init__() is
# problematic, but it is a natural Node subclass in all other respects.
@classmethod
def _new_instance(cls, children=None, connector=None, negated=False):
"""
This is called to create a new instance of this class when we need new
Nodes (or subclasses) in the internal code in this class. Normally, it
just shadows __init__(). However, subclasses with an __init__ signature
that is not an extension of Node.__init__ might need to implement this
method to allow a Node to create a new instance of them (if they have
any extra setting up to do).
"""
obj = Node(children, connector, negated)
obj.__class__ = cls
return obj
def __str__(self):
if self.negated:
return '(NOT (%s: %s))' % (self.connector, ', '.join(str(c) for c
in self.children))
return '(%s: %s)' % (self.connector, ', '.join(str(c) for c in
self.children))
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def __deepcopy__(self, memodict):
"""
Utility method used by copy.deepcopy().
"""
obj = Node(connector=self.connector, negated=self.negated)
obj.__class__ = self.__class__
obj.children = copy.deepcopy(self.children, memodict)
return obj
def __len__(self):
"""
The size of a node if the number of children it has.
"""
return len(self.children)
def __bool__(self):
"""
For truth value testing.
"""
return bool(self.children)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __contains__(self, other):
"""
Returns True is 'other' is a direct child of this instance.
"""
return other in self.children
def add(self, data, conn_type, squash=True):
"""
Combines this tree and the data represented by data using the
connector conn_type. The combine is done by squashing the node other
away if possible.
This tree (self) will never be pushed to a child node of the
combined tree, nor will the connector or negated properties change.
The function returns a node which can be used in place of data
regardless if the node other got squashed or not.
If `squash` is False the data is prepared and added as a child to
this tree without further logic.
"""
if data in self.children:
return data
if not squash:
self.children.append(data)
return data
if self.connector == conn_type:
# We can reuse self.children to append or squash the node other.
if (isinstance(data, Node) and not data.negated
and (data.connector == conn_type or len(data) == 1)):
# We can squash the other node's children directly into this
# node. We are just doing (AB)(CD) == (ABCD) here, with the
# addition that if the length of the other node is 1 the
# connector doesn't matter. However, for the len(self) == 1
# case we don't want to do the squashing, as it would alter
# self.connector.
self.children.extend(data.children)
return self
else:
# We could use perhaps additional logic here to see if some
# children could be used for pushdown here.
self.children.append(data)
return data
else:
obj = self._new_instance(self.children, self.connector,
self.negated)
self.connector = conn_type
self.children = [obj, data]
return data
def negate(self):
"""
Negate the sense of the root connector.
"""
self.negated = not self.negated
| bsd-3-clause |
webdev1001/ansible | v2/ansible/vars/__init__.py | 11 | 11403 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from collections import defaultdict
try:
from hashlib import sha1
except ImportError:
from sha import sha as sha1
from ansible import constants as C
from ansible.parsing import DataLoader
from ansible.plugins.cache import FactCache
from ansible.template import Templar
from ansible.utils.debug import debug
from ansible.vars.hostvars import HostVars
CACHED_VARS = dict()
class VariableManager:
def __init__(self):
self._fact_cache = FactCache()
self._vars_cache = defaultdict(dict)
self._extra_vars = defaultdict(dict)
self._host_vars_files = defaultdict(dict)
self._group_vars_files = defaultdict(dict)
self._inventory = None
self._omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()
def _get_cache_entry(self, play=None, host=None, task=None):
play_id = "NONE"
if play:
play_id = play._uuid
host_id = "NONE"
if host:
host_id = host.get_name()
task_id = "NONE"
if task:
task_id = task._uuid
return "PLAY:%s;HOST:%s;TASK:%s" % (play_id, host_id, task_id)
@property
def extra_vars(self):
''' ensures a clean copy of the extra_vars are made '''
return self._extra_vars.copy()
def set_extra_vars(self, value):
''' ensures a clean copy of the extra_vars are used to set the value '''
assert isinstance(value, dict)
self._extra_vars = value.copy()
def set_inventory(self, inventory):
self._inventory = inventory
def _combine_vars(self, a, b):
'''
Combines dictionaries of variables, based on the hash behavior
'''
# FIXME: do we need this from utils, or should it just
# be merged into this definition?
#_validate_both_dicts(a, b)
if C.DEFAULT_HASH_BEHAVIOUR == "merge":
return self._merge_dicts(a, b)
else:
return dict(a.items() + b.items())
def _merge_dicts(self, a, b):
'''
Recursively merges dict b into a, so that keys
from b take precedence over keys from a.
'''
result = dict()
# FIXME: do we need this from utils, or should it just
# be merged into this definition?
#_validate_both_dicts(a, b)
for dicts in a, b:
# next, iterate over b keys and values
for k, v in dicts.iteritems():
# if there's already such key in a
# and that key contains dict
if k in result and isinstance(result[k], dict):
# merge those dicts recursively
result[k] = self._merge_dicts(a[k], v)
else:
# otherwise, just copy a value from b to a
result[k] = v
return result
def get_vars(self, loader, play=None, host=None, task=None):
'''
Returns the variables, with optional "context" given via the parameters
for the play, host, and task (which could possibly result in different
sets of variables being returned due to the additional context).
The order of precedence is:
- play->roles->get_default_vars (if there is a play context)
- group_vars_files[host] (if there is a host context)
- host_vars_files[host] (if there is a host context)
- host->get_vars (if there is a host context)
- fact_cache[host] (if there is a host context)
- vars_cache[host] (if there is a host context)
- play vars (if there is a play context)
- play vars_files (if there's no host context, ignore
file names that cannot be templated)
- task->get_vars (if there is a task context)
- extra vars
'''
debug("in VariableManager get_vars()")
cache_entry = self._get_cache_entry(play=play, host=host, task=task)
if cache_entry in CACHED_VARS:
debug("vars are cached, returning them now")
return CACHED_VARS[cache_entry]
all_vars = defaultdict(dict)
if play:
# first we compile any vars specified in defaults/main.yml
# for all roles within the specified play
for role in play.get_roles():
all_vars = self._combine_vars(all_vars, role.get_default_vars())
if host:
# next, if a host is specified, we load any vars from group_vars
# files and then any vars from host_vars files which may apply to
# this host or the groups it belongs to
# we merge in the special 'all' group_vars first, if they exist
if 'all' in self._group_vars_files:
all_vars = self._combine_vars(all_vars, self._group_vars_files['all'])
for group in host.get_groups():
group_name = group.get_name()
all_vars = self._combine_vars(all_vars, group.get_vars())
if group_name in self._group_vars_files and group_name != 'all':
all_vars = self._combine_vars(all_vars, self._group_vars_files[group_name])
host_name = host.get_name()
if host_name in self._host_vars_files:
all_vars = self._combine_vars(all_vars, self._host_vars_files[host_name])
# then we merge in vars specified for this host
all_vars = self._combine_vars(all_vars, host.get_vars())
# next comes the facts cache and the vars cache, respectively
all_vars = self._combine_vars(all_vars, self._fact_cache.get(host.get_name(), dict()))
if play:
all_vars = self._combine_vars(all_vars, play.get_vars())
templar = Templar(loader=loader, variables=all_vars)
for vars_file in play.get_vars_files():
try:
vars_file = templar.template(vars_file)
data = loader.load_from_file(vars_file)
all_vars = self._combine_vars(all_vars, data)
except:
# FIXME: get_vars should probably be taking a flag to determine
# whether or not vars files errors should be fatal at this
# stage, or just base it on whether a host was specified?
pass
for role in play.get_roles():
all_vars = self._combine_vars(all_vars, role.get_vars())
if host:
all_vars = self._combine_vars(all_vars, self._vars_cache.get(host.get_name(), dict()))
if task:
if task._role:
all_vars = self._combine_vars(all_vars, task._role.get_vars())
all_vars = self._combine_vars(all_vars, task.get_vars())
all_vars = self._combine_vars(all_vars, self._extra_vars)
# FIXME: make sure all special vars are here
# Finally, we create special vars
if host and self._inventory is not None:
hostvars = HostVars(vars_manager=self, inventory=self._inventory, loader=loader)
all_vars['hostvars'] = hostvars
if self._inventory is not None:
all_vars['inventory_dir'] = self._inventory.basedir()
# the 'omit' value alows params to be left out if the variable they are based on is undefined
all_vars['omit'] = self._omit_token
CACHED_VARS[cache_entry] = all_vars
debug("done with get_vars()")
return all_vars
def _get_inventory_basename(self, path):
'''
Returns the bsaename minus the extension of the given path, so the
bare filename can be matched against host/group names later
'''
(name, ext) = os.path.splitext(os.path.basename(path))
if ext not in ('yml', 'yaml'):
return os.path.basename(path)
else:
return name
def _load_inventory_file(self, path, loader):
'''
helper function, which loads the file and gets the
basename of the file without the extension
'''
if os.path.isdir(path):
data = dict()
try:
names = os.listdir(path)
except os.error, err:
raise AnsibleError("This folder cannot be listed: %s: %s." % (path, err.strerror))
# evaluate files in a stable order rather than whatever
# order the filesystem lists them.
names.sort()
# do not parse hidden files or dirs, e.g. .svn/
paths = [os.path.join(path, name) for name in names if not name.startswith('.')]
for p in paths:
_found, results = self._load_inventory_file(path=p, loader=loader)
data = self._combine_vars(data, results)
else:
data = loader.load_from_file(path)
name = self._get_inventory_basename(path)
return (name, data)
def add_host_vars_file(self, path, loader):
'''
Loads and caches a host_vars file in the _host_vars_files dict,
where the key to that dictionary is the basename of the file, minus
the extension, for matching against a given inventory host name
'''
if os.path.exists(path):
(name, data) = self._load_inventory_file(path, loader)
self._host_vars_files[name] = data
def add_group_vars_file(self, path, loader):
'''
Loads and caches a host_vars file in the _host_vars_files dict,
where the key to that dictionary is the basename of the file, minus
the extension, for matching against a given inventory host name
'''
if os.path.exists(path):
(name, data) = self._load_inventory_file(path, loader)
self._group_vars_files[name] = data
def set_host_facts(self, host, facts):
'''
Sets or updates the given facts for a host in the fact cache.
'''
assert isinstance(facts, dict)
host_name = host.get_name()
if host_name not in self._fact_cache:
self._fact_cache[host_name] = facts
else:
self._fact_cache[host_name].update(facts)
def set_host_variable(self, host, varname, value):
'''
Sets a value in the vars_cache for a host.
'''
host_name = host.get_name()
if host_name not in self._vars_cache:
self._vars_cache[host_name] = dict()
self._vars_cache[host_name][varname] = value
| gpl-3.0 |
leandrotoledo/oppia | core/domain/rule_domain.py | 9 | 6544 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes relating to rules."""
__author__ = 'Sean Lip'
import inspect
import os
import pkgutil
from extensions.objects.models import objects
import feconf
import jinja_utils
# TODO(sll): In the frontend, use the rule descriptions as the single source
# of truth for the params.
FUZZY_RULE_TYPE = 'FuzzyMatches'
def get_obj_type_for_param_name(rule_class, param_name):
"""Gets the obj type for a given param name."""
param_list = get_param_list(rule_class.description)
for item in param_list:
if item[0] == param_name:
return item[1]
raise Exception(
'Rule %s has no param called %s' % (rule_class.__name__, param_name))
def get_rules_for_obj_type(obj_type):
"""Gets all rules for a given object type.
Args:
obj_type: str. The name of the object type.
"""
rule_dir = os.path.join(os.getcwd(), feconf.RULES_DIR)
rule_class_name = '%sRule' % obj_type
results = []
for loader, name, _ in pkgutil.iter_modules(path=[rule_dir]):
if name.endswith('_test'):
continue
module = loader.find_module(name).load_module(name)
for name, clazz in inspect.getmembers(module, inspect.isclass):
ancestors = clazz.__bases__
ancestor_class_names = [c.__name__ for c in ancestors]
if rule_class_name in ancestor_class_names:
results.append(clazz)
return results
def get_description_strings_for_obj_type(obj_type):
"""Returns a dict whose keys are rule names and whose values are the
corresponding description strings.
"""
rules = get_rules_for_obj_type(obj_type)
return {
rule.__name__: rule.description
for rule in rules
}
def get_param_list(description):
"""Get a parameter list from the rule description."""
param_list = []
while description.find('{{') != -1:
opening_index = description.find('{{')
description = description[opening_index + 2:]
bar_index = description.find('|')
param_name = description[: bar_index]
description = description[bar_index + 1:]
closing_index = description.find('}}')
normalizer_string = description[: closing_index]
description = description[closing_index + 2:]
param_list.append(
(param_name, getattr(objects, normalizer_string))
)
return param_list
CERTAIN_TRUE_VALUE = 1.0
CERTAIN_FALSE_VALUE = 0.0
class Rule(object):
"""Abstract base class for a value object that represents a rule.
All rules assume that the subject and rule initialization parameters
are JSONifiable objects (such as primitives, lists, dicts, and
compositions of these, but NOT sets, tuples, etc.). This is enforced
by normalizing the subject and rule initialization parameters to
JSONifiable objects before any evaluations are performed.
"""
subject_type = None
# Description of the rule, e.g. "is equal to {{x|Int}}". Should be
# overridden by subclasses.
description = ''
_PARAMS = None
_fs = None
@property
def params(self):
if self._PARAMS is None:
# Derive the rule params from its description.
self._PARAMS = get_param_list(self.description)
return self._PARAMS
def __init__(self, *args):
if len(args) != len(self.params):
raise ValueError(
'Expected parameters %s, received %s' % (self.params, args))
for ind, param_tuple in enumerate(self.params):
setattr(self, param_tuple[0], param_tuple[1].normalize(args[ind]))
self._validate_params()
def _validate_params(self):
"""Validates the rule object immediately after initialization."""
pass
def _evaluate(self, subject):
"""Returns a normalized value between 0 and 1 indicating the truth value
of the evaluation, where 1.0 is certainly true and 0.0 is certainly
false. This is to be implemented in overridden classes.
"""
raise NotImplementedError
def _fuzzify_truth_value(self, bool_value):
"""Returns a fuzzy truth value for a crisp true or false value. A crisp
value of true is represented by the fuzzy value of 1.0 and a crisp
value of false is represented by 0.0.
"""
return CERTAIN_TRUE_VALUE if bool(bool_value) else CERTAIN_FALSE_VALUE
def _invert_fuzzy_truth_value(self, fuzzy_value):
"""Performs a NOT operation on a fuzzy value."""
return CERTAIN_TRUE_VALUE - fuzzy_value
def set_fs(self, fs):
"""Set an abstract file system to use with this rule."""
self._fs = fs
return self
@property
def fs(self):
return self._fs
def eval(self, subject):
"""Public evaluation method.
Args:
subject: the thing to be evaluated.
Returns:
bool: the result of the evaluation.
"""
return self._evaluate(self.subject_type.normalize(subject))
def evaluate_rule(rule_spec, answer_type, context_params, answer, fs):
"""Evaluates a rule spec using context_params. Returns a boolean."""
all_rule_classes = get_rules_for_obj_type(answer_type)
rule = next(r for r in all_rule_classes
if r.__name__ == rule_spec.rule_type)
param_list = []
param_defns = get_param_list(rule.description)
for (param_name, obj_cls) in param_defns:
parsed_param = rule_spec.inputs[param_name]
if (isinstance(parsed_param, basestring) and '{{' in parsed_param):
parsed_param = jinja_utils.parse_string(
parsed_param, context_params, autoescape=False)
normalized_param = obj_cls.normalize(parsed_param)
param_list.append(normalized_param)
constructed_rule = rule(*param_list)
constructed_rule.set_fs(fs)
return constructed_rule.eval(answer)
| apache-2.0 |
ruibarreira/linuxtrail | usr/lib/python2.7/dist-packages/numpy/lib/tests/test_recfunctions.py | 8 | 28717 | from __future__ import division, absolute_import, print_function
import sys
import numpy as np
import numpy.ma as ma
from numpy.ma.testutils import *
from numpy.ma.mrecords import MaskedRecords
from numpy.lib.recfunctions import *
get_names = np.lib.recfunctions.get_names
get_names_flat = np.lib.recfunctions.get_names_flat
zip_descr = np.lib.recfunctions.zip_descr
class TestRecFunctions(TestCase):
"""
Misc tests
"""
#
def setUp(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array([('A', 1.), ('B', 2.)],
dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_zip_descr(self):
"Test zip_descr"
(w, x, y, z) = self.data
# Std array
test = zip_descr((x, x), flatten=True)
assert_equal(test,
np.dtype([('', int), ('', int)]))
test = zip_descr((x, x), flatten=False)
assert_equal(test,
np.dtype([('', int), ('', int)]))
# Std & flexible-dtype
test = zip_descr((x, z), flatten=True)
assert_equal(test,
np.dtype([('', int), ('A', '|S3'), ('B', float)]))
test = zip_descr((x, z), flatten=False)
assert_equal(test,
np.dtype([('', int),
('', [('A', '|S3'), ('B', float)])]))
# Standard & nested dtype
test = zip_descr((x, w), flatten=True)
assert_equal(test,
np.dtype([('', int),
('a', int),
('ba', float), ('bb', int)]))
test = zip_descr((x, w), flatten=False)
assert_equal(test,
np.dtype([('', int),
('', [('a', int),
('b', [('ba', float), ('bb', int)])])]))
def test_drop_fields(self):
"Test drop_fields"
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
# A basic field
test = drop_fields(a, 'a')
control = np.array([((2, 3.0),), ((5, 6.0),)],
dtype=[('b', [('ba', float), ('bb', int)])])
assert_equal(test, control)
# Another basic field (but nesting two fields)
test = drop_fields(a, 'b')
control = np.array([(1,), (4,)], dtype=[('a', int)])
assert_equal(test, control)
# A nested sub-field
test = drop_fields(a, ['ba', ])
control = np.array([(1, (3.0,)), (4, (6.0,))],
dtype=[('a', int), ('b', [('bb', int)])])
assert_equal(test, control)
# All the nested sub-field from a field: zap that field
test = drop_fields(a, ['ba', 'bb'])
control = np.array([(1,), (4,)], dtype=[('a', int)])
assert_equal(test, control)
#
test = drop_fields(a, ['a', 'b'])
assert_(test is None)
def test_rename_fields(self):
"Tests rename fields"
a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
dtype=[('a', int),
('b', [('ba', float), ('bb', (float, 2))])])
test = rename_fields(a, {'a':'A', 'bb':'BB'})
newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])]
control = a.view(newdtype)
assert_equal(test.dtype, newdtype)
assert_equal(test, control)
def test_get_names(self):
"Tests get_names"
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_names(ndtype)
assert_equal(test, ('A', 'B'))
#
ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
test = get_names(ndtype)
assert_equal(test, ('a', ('b', ('ba', 'bb'))))
def test_get_names_flat(self):
"Test get_names_flat"
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_names_flat(ndtype)
assert_equal(test, ('A', 'B'))
#
ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
test = get_names_flat(ndtype)
assert_equal(test, ('a', 'b', 'ba', 'bb'))
def test_get_fieldstructure(self):
"Test get_fieldstructure"
# No nested fields
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_fieldstructure(ndtype)
assert_equal(test, {'A':[], 'B':[]})
# One 1-nested field
ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])
test = get_fieldstructure(ndtype)
assert_equal(test, {'A': [], 'B': [], 'BA':['B', ], 'BB':['B']})
# One 2-nested fields
ndtype = np.dtype([('A', int),
('B', [('BA', int),
('BB', [('BBA', int), ('BBB', int)])])])
test = get_fieldstructure(ndtype)
control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'],
'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
assert_equal(test, control)
def test_find_duplicates(self):
"Test find_duplicates"
a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')),
(1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))],
mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)),
(0, (0, 0)), (1, (0, 0)), (0, (1, 0))],
dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])])
#
test = find_duplicates(a, ignoremask=False, return_index=True)
control = [0, 2]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
#
test = find_duplicates(a, key='A', return_index=True)
control = [0, 1, 2, 3, 5]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
#
test = find_duplicates(a, key='B', return_index=True)
control = [0, 1, 2, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
#
test = find_duplicates(a, key='BA', return_index=True)
control = [0, 1, 2, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
#
test = find_duplicates(a, key='BB', return_index=True)
control = [0, 1, 2, 3, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
def test_find_duplicates_ignoremask(self):
"Test the ignoremask option of find_duplicates"
ndtype = [('a', int)]
a = ma.array([1, 1, 1, 2, 2, 3, 3],
mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
test = find_duplicates(a, ignoremask=True, return_index=True)
control = [0, 1, 3, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
#
test = find_duplicates(a, ignoremask=False, return_index=True)
control = [0, 1, 2, 3, 4, 6]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
class TestRecursiveFillFields(TestCase):
"""
Test recursive_fill_fields.
"""
def test_simple_flexible(self):
"Test recursive_fill_fields on flexible-array"
a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
b = np.zeros((3,), dtype=a.dtype)
test = recursive_fill_fields(a, b)
control = np.array([(1, 10.), (2, 20.), (0, 0.)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
#
def test_masked_flexible(self):
"Test recursive_fill_fields on masked flexible-array"
a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)],
dtype=[('A', int), ('B', float)])
b = ma.zeros((3,), dtype=a.dtype)
test = recursive_fill_fields(a, b)
control = ma.array([(1, 10.), (2, 20.), (0, 0.)],
mask=[(0, 1), (1, 0), (0, 0)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
#
class TestMergeArrays(TestCase):
"""
Test merge_arrays
"""
def setUp(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array([('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
#
def test_solo(self):
"Test merge_arrays on a single array."
(_, x, _, z) = self.data
#
test = merge_arrays(x)
control = np.array([(1,), (2,)], dtype=[('f0', int)])
assert_equal(test, control)
test = merge_arrays((x,))
assert_equal(test, control)
#
test = merge_arrays(z, flatten=False)
assert_equal(test, z)
test = merge_arrays(z, flatten=True)
assert_equal(test, z)
#
def test_solo_w_flatten(self):
"Test merge_arrays on a single array w & w/o flattening"
w = self.data[0]
test = merge_arrays(w, flatten=False)
assert_equal(test, w)
#
test = merge_arrays(w, flatten=True)
control = np.array([(1, 2, 3.0), (4, 5, 6.0)],
dtype=[('a', int), ('ba', float), ('bb', int)])
assert_equal(test, control)
#
def test_standard(self):
"Test standard & standard"
# Test merge arrays
(_, x, y, _) = self.data
test = merge_arrays((x, y), usemask=False)
control = np.array([(1, 10), (2, 20), (-1, 30)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
#
test = merge_arrays((x, y), usemask=True)
control = ma.array([(1, 10), (2, 20), (-1, 30)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
def test_flatten(self):
"Test standard & flexible"
(_, x, _, z) = self.data
test = merge_arrays((x, z), flatten=True)
control = np.array([(1, 'A', 1.), (2, 'B', 2.)],
dtype=[('f0', int), ('A', '|S3'), ('B', float)])
assert_equal(test, control)
#
test = merge_arrays((x, z), flatten=False)
control = np.array([(1, ('A', 1.)), (2, ('B', 2.))],
dtype=[('f0', int),
('f1', [('A', '|S3'), ('B', float)])])
assert_equal(test, control)
#
def test_flatten_wflexible(self):
"Test flatten standard & nested"
(w, x, _, _) = self.data
test = merge_arrays((x, w), flatten=True)
control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)],
dtype=[('f0', int),
('a', int), ('ba', float), ('bb', int)])
assert_equal(test, control)
#
test = merge_arrays((x, w), flatten=False)
controldtype = dtype = [('f0', int),
('f1', [('a', int),
('b', [('ba', float), ('bb', int)])])]
control = np.array([(1., (1, (2, 3.0))), (2, (4, (5, 6.0)))],
dtype=controldtype)
#
def test_wmasked_arrays(self):
"Test merge_arrays masked arrays"
(_, x, _, _) = self.data
mx = ma.array([1, 2, 3], mask=[1, 0, 0])
test = merge_arrays((x, mx), usemask=True)
control = ma.array([(1, 1), (2, 2), (-1, 3)],
mask=[(0, 1), (0, 0), (1, 0)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
test = merge_arrays((x, mx), usemask=True, asrecarray=True)
assert_equal(test, control)
assert_(isinstance(test, MaskedRecords))
#
def test_w_singlefield(self):
"Test single field"
test = merge_arrays((np.array([1, 2]).view([('a', int)]),
np.array([10., 20., 30.])),)
control = ma.array([(1, 10.), (2, 20.), (-1, 30.)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('a', int), ('f1', float)])
assert_equal(test, control)
#
def test_w_shorter_flex(self):
"Test merge_arrays w/ a shorter flexndarray."
z = self.data[-1]
test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
dtype=[('A', '|S3'), ('B', float), ('C', int)])
#
def test_singlerecord(self):
(_, x, y, z) = self.data
test = merge_arrays((x[0], y[0], z[0]), usemask=False)
control = np.array([(1, 10, ('A', 1))],
dtype=[('f0', int),
('f1', int),
('f2', [('A', '|S3'), ('B', float)])])
assert_equal(test, control)
class TestAppendFields(TestCase):
"""
Test append_fields
"""
def setUp(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array([('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
#
def test_append_single(self):
"Test simple case"
(_, x, _, _) = self.data
test = append_fields(x, 'A', data=[10, 20, 30])
control = ma.array([(1, 10), (2, 20), (-1, 30)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('f0', int), ('A', int)],)
assert_equal(test, control)
#
def test_append_double(self):
"Test simple case"
(_, x, _, _) = self.data
test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]])
control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)],
mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)],
dtype=[('f0', int), ('A', int), ('B', int)],)
assert_equal(test, control)
#
def test_append_on_flex(self):
"Test append_fields on flexible type arrays"
z = self.data[-1]
test = append_fields(z, 'C', data=[10, 20, 30])
control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)],
mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('C', int)],)
assert_equal(test, control)
#
def test_append_on_nested(self):
"Test append_fields on nested fields"
w = self.data[0]
test = append_fields(w, 'C', data=[10, 20, 30])
control = ma.array([(1, (2, 3.0), 10),
(4, (5, 6.0), 20),
(-1, (-1, -1.), 30)],
mask=[(0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)],
dtype=[('a', int),
('b', [('ba', float), ('bb', int)]),
('C', int)],)
assert_equal(test, control)
class TestStackArrays(TestCase):
"""
Test stack_arrays
"""
def setUp(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array([('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
#
def test_solo(self):
"Test stack_arrays on single arrays"
(_, x, _, _) = self.data
test = stack_arrays((x,))
assert_equal(test, x)
self.assertTrue(test is x)
#
test = stack_arrays(x)
assert_equal(test, x)
self.assertTrue(test is x)
#
def test_unnamed_fields(self):
"Tests combinations of arrays w/o named fields"
(_, x, y, _) = self.data
#
test = stack_arrays((x, x), usemask=False)
control = np.array([1, 2, 1, 2])
assert_equal(test, control)
#
test = stack_arrays((x, y), usemask=False)
control = np.array([1, 2, 10, 20, 30])
assert_equal(test, control)
#
test = stack_arrays((y, x), usemask=False)
control = np.array([10, 20, 30, 1, 2])
assert_equal(test, control)
#
def test_unnamed_and_named_fields(self):
"Test combination of arrays w/ & w/o named fields"
(_, x, _, z) = self.data
#
test = stack_arrays((x, z))
control = ma.array([(1, -1, -1), (2, -1, -1),
(-1, 'A', 1), (-1, 'B', 2)],
mask=[(0, 1, 1), (0, 1, 1),
(1, 0, 0), (1, 0, 0)],
dtype=[('f0', int), ('A', '|S3'), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
test = stack_arrays((z, x))
control = ma.array([('A', 1, -1), ('B', 2, -1),
(-1, -1, 1), (-1, -1, 2), ],
mask=[(0, 0, 1), (0, 0, 1),
(1, 1, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('f2', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
test = stack_arrays((z, z, x))
control = ma.array([('A', 1, -1), ('B', 2, -1),
('A', 1, -1), ('B', 2, -1),
(-1, -1, 1), (-1, -1, 2), ],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 1), (0, 0, 1),
(1, 1, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('f2', int)])
assert_equal(test, control)
#
def test_matching_named_fields(self):
"Test combination of arrays w/ matching field names"
(_, x, _, z) = self.data
zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)])
test = stack_arrays((z, zz))
control = ma.array([('A', 1, -1), ('B', 2, -1),
('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 0), (0, 0, 0), (0, 0, 0)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
test = stack_arrays((z, zz, x))
ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)]
control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1),
('a', 10., 100., -1), ('b', 20., 200., -1),
('c', 30., 300., -1),
(-1, -1, -1, 1), (-1, -1, -1, 2)],
dtype=ndtype,
mask=[(0, 0, 1, 1), (0, 0, 1, 1),
(0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1),
(1, 1, 1, 0), (1, 1, 1, 0)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_defaults(self):
"Test defaults: no exception raised if keys of defaults are not fields."
(_, _, _, z) = self.data
zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)])
defaults = {'A':'???', 'B':-999., 'C':-9999., 'D':-99999.}
test = stack_arrays((z, zz), defaults=defaults)
control = ma.array([('A', 1, -9999.), ('B', 2, -9999.),
('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 0), (0, 0, 0), (0, 0, 0)])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_autoconversion(self):
"Tests autoconversion"
adtype = [('A', int), ('B', bool), ('C', float)]
a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
bdtype = [('A', int), ('B', float), ('C', float)]
b = ma.array([(4, 5, 6)], dtype=bdtype)
control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
dtype=bdtype)
test = stack_arrays((a, b), autoconvert=True)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
try:
test = stack_arrays((a, b), autoconvert=False)
except TypeError:
pass
else:
raise AssertionError
def test_checktitles(self):
"Test using titles in the field names"
adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
b = ma.array([(4, 5, 6)], dtype=bdtype)
test = stack_arrays((a, b))
control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
dtype=bdtype)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
class TestJoinBy(TestCase):
def setUp(self):
self.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('c', int)])
self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('d', int)])
#
def test_inner_join(self):
"Basic test of join_by"
a, b = self.a, self.b
#
test = join_by('a', a, b, jointype='inner')
control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101),
(7, 57, 67, 107, 102), (8, 58, 68, 108, 103),
(9, 59, 69, 109, 104)],
dtype=[('a', int), ('b1', int), ('b2', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_join(self):
a, b = self.a, self.b
#
test = join_by(('a', 'b'), a, b)
control = np.array([(5, 55, 105, 100), (6, 56, 106, 101),
(7, 57, 107, 102), (8, 58, 108, 103),
(9, 59, 109, 104)],
dtype=[('a', int), ('b', int),
('c', int), ('d', int)])
def test_outer_join(self):
a, b = self.a, self.b
#
test = join_by(('a', 'b'), a, b, 'outer')
control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),
(2, 52, 102, -1), (3, 53, 103, -1),
(4, 54, 104, -1), (5, 55, 105, -1),
(5, 65, -1, 100), (6, 56, 106, -1),
(6, 66, -1, 101), (7, 57, 107, -1),
(7, 67, -1, 102), (8, 58, 108, -1),
(8, 68, -1, 103), (9, 59, 109, -1),
(9, 69, -1, 104), (10, 70, -1, 105),
(11, 71, -1, 106), (12, 72, -1, 107),
(13, 73, -1, 108), (14, 74, -1, 109)],
mask=[(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 1, 0),
(0, 0, 1, 0), (0, 0, 1, 0),
(0, 0, 1, 0), (0, 0, 1, 0)],
dtype=[('a', int), ('b', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_leftouter_join(self):
a, b = self.a, self.b
#
test = join_by(('a', 'b'), a, b, 'leftouter')
control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),
(2, 52, 102, -1), (3, 53, 103, -1),
(4, 54, 104, -1), (5, 55, 105, -1),
(6, 56, 106, -1), (7, 57, 107, -1),
(8, 58, 108, -1), (9, 59, 109, -1)],
mask=[(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1)],
dtype=[('a', int), ('b', int), ('c', int), ('d', int)])
class TestJoinBy2(TestCase):
@classmethod
def setUp(cls):
cls.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('c', int)])
cls.b = np.array(list(zip(np.arange(10), np.arange(65, 75),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('d', int)])
def test_no_r1postfix(self):
"Basic test of join_by no_r1postfix"
a, b = self.a, self.b
test = join_by('a', a, b, r1postfix='', r2postfix='2', jointype='inner')
control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),
(2, 52, 67, 102, 102), (3, 53, 68, 103, 103),
(4, 54, 69, 104, 104), (5, 55, 70, 105, 105),
(6, 56, 71, 106, 106), (7, 57, 72, 107, 107),
(8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],
dtype=[('a', int), ('b', int), ('b2', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_no_postfix(self):
self.assertRaises(ValueError, join_by, 'a', self.a, self.b, r1postfix='', r2postfix='')
def test_no_r2postfix(self):
"Basic test of join_by no_r2postfix"
a, b = self.a, self.b
test = join_by('a', a, b, r1postfix='1', r2postfix='', jointype='inner')
control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),
(2, 52, 67, 102, 102), (3, 53, 68, 103, 103),
(4, 54, 69, 104, 104), (5, 55, 70, 105, 105),
(6, 56, 71, 106, 106), (7, 57, 72, 107, 107),
(8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],
dtype=[('a', int), ('b1', int), ('b', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_two_keys_two_vars(self):
a = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
np.arange(50, 60), np.arange(10, 20))),
dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
b = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
np.arange(65, 75), np.arange(0, 10))),
dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
control = np.array([(10, 0, 50, 65, 10, 0), (11, 0, 51, 66, 11, 1),
(10, 1, 52, 67, 12, 2), (11, 1, 53, 68, 13, 3),
(10, 2, 54, 69, 14, 4), (11, 2, 55, 70, 15, 5),
(10, 3, 56, 71, 16, 6), (11, 3, 57, 72, 17, 7),
(10, 4, 58, 73, 18, 8), (11, 4, 59, 74, 19, 9)],
dtype=[('k', int), ('a', int), ('b1', int),
('b2', int), ('c1', int), ('c2', int)])
test = join_by(['a', 'k'], a, b, r1postfix='1', r2postfix='2', jointype='inner')
assert_equal(test.dtype, control.dtype)
assert_equal(test, control)
if __name__ == '__main__':
run_module_suite()
| gpl-3.0 |
minlexx/pyevemon | esi_client/models/get_characters_character_id_mail_mail_id_internal_server_error.py | 1 | 3191 | # coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online
OpenAPI spec version: 0.4.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class GetCharactersCharacterIdMailMailIdInternalServerError(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, error=None):
"""
GetCharactersCharacterIdMailMailIdInternalServerError - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'error': 'str'
}
self.attribute_map = {
'error': 'error'
}
self._error = error
@property
def error(self):
"""
Gets the error of this GetCharactersCharacterIdMailMailIdInternalServerError.
Internal server error message
:return: The error of this GetCharactersCharacterIdMailMailIdInternalServerError.
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""
Sets the error of this GetCharactersCharacterIdMailMailIdInternalServerError.
Internal server error message
:param error: The error of this GetCharactersCharacterIdMailMailIdInternalServerError.
:type: str
"""
self._error = error
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, GetCharactersCharacterIdMailMailIdInternalServerError):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| gpl-3.0 |
openstack/manila | manila/tests/api/views/test_share_network_subnets.py | 1 | 2945 | # Copyright 2019 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
from manila.api.views import share_network_subnets
from manila import test
from manila.tests.api import fakes
from manila.tests import db_utils
@ddt.ddt
class ViewBuilderTestCase(test.TestCase):
def setUp(self):
super(ViewBuilderTestCase, self).setUp()
self.builder = share_network_subnets.ViewBuilder()
self.share_network = db_utils.create_share_network(
name='fake_network', id='fake_sn_id')
def _validate_is_detail_return(self, result):
expected_keys = ['id', 'created_at', 'updated_at', 'neutron_net_id',
'neutron_subnet_id', 'network_type', 'cidr',
'segmentation_id', 'ip_version', 'share_network_id',
'availability_zone', 'gateway', 'mtu']
for key in expected_keys:
self.assertIn(key, result)
def test_build_share_network_subnet(self):
req = fakes.HTTPRequest.blank('/subnets', version='2.51')
subnet = db_utils.create_share_network_subnet(
share_network_id=self.share_network['id'])
result = self.builder.build_share_network_subnet(req, subnet)
self.assertEqual(1, len(result))
self.assertIn('share_network_subnet', result)
self.assertEqual(subnet['id'],
result['share_network_subnet']['id'])
self.assertEqual(subnet['share_network_id'],
result['share_network_subnet']['share_network_id'])
self.assertIsNone(
result['share_network_subnet']['availability_zone'])
self._validate_is_detail_return(result['share_network_subnet'])
def test_build_share_network_subnets(self):
req = fakes.HTTPRequest.blank('/subnets', version='2.51')
share_network = db_utils.create_share_network(
name='fake_network', id='fake_sn_id_1')
subnet = db_utils.create_share_network_subnet(
share_network_id=share_network['id'])
result = self.builder.build_share_network_subnets(req, [subnet])
self.assertIn('share_network_subnets', result)
self.assertEqual(1, len(result['share_network_subnets']))
subnet_list = result['share_network_subnets']
for subnet in subnet_list:
self._validate_is_detail_return(subnet)
| apache-2.0 |
don-github/edx-platform | openedx/core/djangoapps/content/course_structures/api/v0/serializers.py | 65 | 1313 | """
API Serializers
"""
from rest_framework import serializers
class GradingPolicySerializer(serializers.Serializer):
""" Serializer for course grading policy. """
assignment_type = serializers.CharField(source='type')
count = serializers.IntegerField(source='min_count')
dropped = serializers.IntegerField(source='drop_count')
weight = serializers.FloatField()
# pylint: disable=invalid-name
class BlockSerializer(serializers.Serializer):
""" Serializer for course structure block. """
id = serializers.CharField(source='usage_key')
type = serializers.CharField(source='block_type')
parent = serializers.CharField(source='parent')
display_name = serializers.CharField()
graded = serializers.BooleanField(default=False)
format = serializers.CharField()
children = serializers.CharField()
class CourseStructureSerializer(serializers.Serializer):
""" Serializer for course structure. """
root = serializers.CharField(source='root')
blocks = serializers.SerializerMethodField('get_blocks')
def get_blocks(self, structure):
""" Serialize the individual blocks. """
serialized = {}
for key, block in structure['blocks'].iteritems():
serialized[key] = BlockSerializer(block).data
return serialized
| agpl-3.0 |
ShiYw/Sigil | 3rdparty/python/Lib/test/test_importlib/frozen/test_finder.py | 81 | 2209 | from .. import abc
from .. import util
machinery = util.import_importlib('importlib.machinery')
import unittest
class FindSpecTests(abc.FinderTests):
"""Test finding frozen modules."""
def find(self, name, path=None):
finder = self.machinery.FrozenImporter
return finder.find_spec(name, path)
def test_module(self):
name = '__hello__'
spec = self.find(name)
self.assertEqual(spec.origin, 'frozen')
def test_package(self):
spec = self.find('__phello__')
self.assertIsNotNone(spec)
def test_module_in_package(self):
spec = self.find('__phello__.spam', ['__phello__'])
self.assertIsNotNone(spec)
# No frozen package within another package to test with.
test_package_in_package = None
# No easy way to test.
test_package_over_module = None
def test_failure(self):
spec = self.find('<not real>')
self.assertIsNone(spec)
Frozen_FindSpecTests, Source_FindSpecTests = util.test_both(FindSpecTests,
machinery=machinery)
class FinderTests(abc.FinderTests):
"""Test finding frozen modules."""
def find(self, name, path=None):
finder = self.machinery.FrozenImporter
return finder.find_module(name, path)
def test_module(self):
name = '__hello__'
loader = self.find(name)
self.assertTrue(hasattr(loader, 'load_module'))
def test_package(self):
loader = self.find('__phello__')
self.assertTrue(hasattr(loader, 'load_module'))
def test_module_in_package(self):
loader = self.find('__phello__.spam', ['__phello__'])
self.assertTrue(hasattr(loader, 'load_module'))
# No frozen package within another package to test with.
test_package_in_package = None
# No easy way to test.
test_package_over_module = None
def test_failure(self):
loader = self.find('<not real>')
self.assertIsNone(loader)
Frozen_FinderTests, Source_FinderTests = util.test_both(FinderTests,
machinery=machinery)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
ppiotr/Bibedit-some-refactoring | modules/webmessage/lib/webmessage_config.py | 4 | 1821 | # -*- coding: utf-8 -*-
##
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
webmessage config file, here you can manage error messages, size of messages,
quotas, and some db related fields...
"""
__revision__ = "$Id$"
# error messages. (should not happen, except in case of reload, or url
# altering)
CFG_WEBMESSAGE_ERROR_MESSAGES = \
{ 'ERR_WEBMESSAGE_NOTOWNER': '_("This message is not in your mailbox")',
'ERR_WEBMESSAGE_NONICKNAME':'_("No nickname or user for uid #%s")',
'ERR_WEBMESSAGE_NOMESSAGE': '_("This message doesn\'t exist")'
}
# status of message (table user_msgMESSAGE)
CFG_WEBMESSAGE_STATUS_CODE = \
{
'NEW': 'N',
'READ': 'R',
'REMINDER': 'M'
}
# values indicating which results field to display while writing a message
CFG_WEBMESSAGE_RESULTS_FIELD = \
{
'USER': 'user',
'GROUP': 'group',
'NONE': 'none'
}
# separator used in every list of recipients
CFG_WEBMESSAGE_SEPARATOR = ','
# list of roles (find them in accROLE table) without quota
CFG_WEBMESSAGE_ROLES_WITHOUT_QUOTA = ['superadmin']
| gpl-2.0 |
Jenselme/servo | tests/wpt/web-platform-tests/tools/html5lib/utils/spider.py | 436 | 4157 | #!/usr/bin/env python
"""Spider to try and find bugs in the parser. Requires httplib2 and elementtree
usage:
import spider
s = spider.Spider()
s.spider("http://www.google.com", maxURLs=100)
"""
import urllib.request, urllib.error, urllib.parse
import urllib.robotparser
import md5
import httplib2
import html5lib
from html5lib.treebuilders import etree
class Spider(object):
def __init__(self):
self.unvisitedURLs = set()
self.visitedURLs = set()
self.buggyURLs=set()
self.robotParser = urllib.robotparser.RobotFileParser()
self.contentDigest = {}
self.http = httplib2.Http(".cache")
def run(self, initialURL, maxURLs=1000):
urlNumber = 0
self.visitedURLs.add(initialURL)
content = self.loadURL(initialURL)
while maxURLs is None or urlNumber < maxURLs:
if content is not None:
self.parse(content)
urlNumber += 1
if not self.unvisitedURLs:
break
content = self.loadURL(self.unvisitedURLs.pop())
def parse(self, content):
failed = False
p = html5lib.HTMLParser(tree=etree.TreeBuilder)
try:
tree = p.parse(content)
except:
self.buggyURLs.add(self.currentURL)
failed = True
print("BUGGY:", self.currentURL)
self.visitedURLs.add(self.currentURL)
if not failed:
self.updateURLs(tree)
def loadURL(self, url):
resp, content = self.http.request(url, "GET")
self.currentURL = url
digest = md5.md5(content).hexdigest()
if digest in self.contentDigest:
content = None
self.visitedURLs.add(url)
else:
self.contentDigest[digest] = url
if resp['status'] != "200":
content = None
return content
def updateURLs(self, tree):
"""Take all the links in the current document, extract the URLs and
update the list of visited and unvisited URLs according to whether we
have seen them before or not"""
urls = set()
#Remove all links we have already visited
for link in tree.findall(".//a"):
try:
url = urllib.parse.urldefrag(link.attrib['href'])[0]
if (url and url not in self.unvisitedURLs and url
not in self.visitedURLs):
urls.add(url)
except KeyError:
pass
#Remove all non-http URLs and a dd a sutiable base URL where that is
#missing
newUrls = set()
for url in urls:
splitURL = list(urllib.parse.urlsplit(url))
if splitURL[0] != "http":
continue
if splitURL[1] == "":
splitURL[1] = urllib.parse.urlsplit(self.currentURL)[1]
newUrls.add(urllib.parse.urlunsplit(splitURL))
urls = newUrls
responseHeaders = {}
#Now we want to find the content types of the links we haven't visited
for url in urls:
try:
resp, content = self.http.request(url, "HEAD")
responseHeaders[url] = resp
except AttributeError as KeyError:
#Don't know why this happens
pass
#Remove links not of content-type html or pages not found
#XXX - need to deal with other status codes?
toVisit = set([url for url in urls if url in responseHeaders and
"html" in responseHeaders[url]['content-type'] and
responseHeaders[url]['status'] == "200"])
#Now check we are allowed to spider the page
for url in toVisit:
robotURL = list(urllib.parse.urlsplit(url)[:2])
robotURL.extend(["robots.txt", "", ""])
robotURL = urllib.parse.urlunsplit(robotURL)
self.robotParser.set_url(robotURL)
if not self.robotParser.can_fetch("*", url):
toVisit.remove(url)
self.visitedURLs.update(urls)
self.unvisitedURLs.update(toVisit)
| mpl-2.0 |
cancan101/tensorflow | tensorflow/python/kernel_tests/zero_division_test.py | 139 | 2389 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for integer division by zero."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.platform import test
class ZeroDivisionTest(test.TestCase):
def testZeros(self):
with self.test_session(use_gpu=True):
for dtype in dtypes.uint8, dtypes.int16, dtypes.int32, dtypes.int64:
zero = constant_op.constant(0, dtype=dtype)
one = constant_op.constant(1, dtype=dtype)
bads = [one // zero]
if dtype in (dtypes.int32, dtypes.int64):
bads.append(one % zero)
for bad in bads:
try:
result = bad.eval()
except errors_impl.OpError as e:
# Ideally, we'd get a nice exception. In theory, this should only
# happen on CPU, but 32 bit integer GPU division is actually on
# CPU due to a placer bug.
# TODO(irving): Make stricter once the placer bug is fixed.
self.assertIn('Integer division by zero', str(e))
else:
# On the GPU, integer division by zero produces all bits set.
# But apparently on some GPUs "all bits set" for 64 bit division
# means 32 bits set, so we allow 0xffffffff as well. This isn't
# very portable, so we may need to expand this list if other GPUs
# do different things.
self.assertTrue(test.is_gpu_available())
self.assertIn(result, (-1, 0xff, 0xffffffff))
if __name__ == '__main__':
test.main()
| apache-2.0 |
tpodowd/boto | tests/unit/ec2/test_instance.py | 114 | 11050 | #!/usr/bin/env python
import base64
from tests.compat import unittest, mock
from tests.unit import AWSMockServiceTestCase
from boto.ec2.connection import EC2Connection
DESCRIBE_INSTANCE_VPC = br"""<?xml version="1.0" encoding="UTF-8"?>
<DescribeInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2012-10-01/">
<requestId>c6132c74-b524-4884-87f5-0f4bde4a9760</requestId>
<reservationSet>
<item>
<reservationId>r-72ef4a0a</reservationId>
<ownerId>184906166255</ownerId>
<groupSet/>
<instancesSet>
<item>
<instanceId>i-instance</instanceId>
<imageId>ami-1624987f</imageId>
<instanceState>
<code>16</code>
<name>running</name>
</instanceState>
<privateDnsName/>
<dnsName/>
<reason/>
<keyName>mykeypair</keyName>
<amiLaunchIndex>0</amiLaunchIndex>
<productCodes/>
<instanceType>m1.small</instanceType>
<launchTime>2012-12-14T23:48:37.000Z</launchTime>
<placement>
<availabilityZone>us-east-1d</availabilityZone>
<groupName/>
<tenancy>default</tenancy>
</placement>
<kernelId>aki-88aa75e1</kernelId>
<monitoring>
<state>disabled</state>
</monitoring>
<subnetId>subnet-0dc60667</subnetId>
<vpcId>vpc-id</vpcId>
<privateIpAddress>10.0.0.67</privateIpAddress>
<sourceDestCheck>true</sourceDestCheck>
<groupSet>
<item>
<groupId>sg-id</groupId>
<groupName>WebServerSG</groupName>
</item>
</groupSet>
<architecture>x86_64</architecture>
<rootDeviceType>ebs</rootDeviceType>
<rootDeviceName>/dev/sda1</rootDeviceName>
<blockDeviceMapping>
<item>
<deviceName>/dev/sda1</deviceName>
<ebs>
<volumeId>vol-id</volumeId>
<status>attached</status>
<attachTime>2012-12-14T23:48:43.000Z</attachTime>
<deleteOnTermination>true</deleteOnTermination>
</ebs>
</item>
</blockDeviceMapping>
<virtualizationType>paravirtual</virtualizationType>
<clientToken>foo</clientToken>
<tagSet>
<item>
<key>Name</key>
<value/>
</item>
</tagSet>
<hypervisor>xen</hypervisor>
<networkInterfaceSet>
<item>
<networkInterfaceId>eni-id</networkInterfaceId>
<subnetId>subnet-id</subnetId>
<vpcId>vpc-id</vpcId>
<description>Primary network interface</description>
<ownerId>ownerid</ownerId>
<status>in-use</status>
<privateIpAddress>10.0.0.67</privateIpAddress>
<sourceDestCheck>true</sourceDestCheck>
<groupSet>
<item>
<groupId>sg-id</groupId>
<groupName>WebServerSG</groupName>
</item>
</groupSet>
<attachment>
<attachmentId>eni-attach-id</attachmentId>
<deviceIndex>0</deviceIndex>
<status>attached</status>
<attachTime>2012-12-14T23:48:37.000Z</attachTime>
<deleteOnTermination>true</deleteOnTermination>
</attachment>
<privateIpAddressesSet>
<item>
<privateIpAddress>10.0.0.67</privateIpAddress>
<primary>true</primary>
</item>
<item>
<privateIpAddress>10.0.0.54</privateIpAddress>
<primary>false</primary>
</item>
<item>
<privateIpAddress>10.0.0.55</privateIpAddress>
<primary>false</primary>
</item>
</privateIpAddressesSet>
</item>
</networkInterfaceSet>
<ebsOptimized>false</ebsOptimized>
</item>
</instancesSet>
</item>
</reservationSet>
</DescribeInstancesResponse>
"""
RUN_INSTANCE_RESPONSE = br"""
<RunInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2012-06-01/">
<requestId>ad4b83c2-f606-4c39-90c6-5dcc5be823e1</requestId>
<reservationId>r-c5cef7a7</reservationId>
<ownerId>ownerid</ownerId>
<groupSet>
<item>
<groupId>sg-id</groupId>
<groupName>SSH</groupName>
</item>
</groupSet>
<instancesSet>
<item>
<instanceId>i-ff0f1299</instanceId>
<imageId>ami-ed65ba84</imageId>
<instanceState>
<code>0</code>
<name>pending</name>
</instanceState>
<privateDnsName/>
<dnsName/>
<reason/>
<keyName>awskeypair</keyName>
<amiLaunchIndex>0</amiLaunchIndex>
<productCodes/>
<instanceType>t1.micro</instanceType>
<launchTime>2012-05-30T19:21:18.000Z</launchTime>
<placement>
<availabilityZone>us-east-1a</availabilityZone>
<groupName/>
<tenancy>default</tenancy>
</placement>
<kernelId>aki-b6aa75df</kernelId>
<monitoring>
<state>disabled</state>
</monitoring>
<groupSet>
<item>
<groupId>sg-99a710f1</groupId>
<groupName>SSH</groupName>
</item>
</groupSet>
<stateReason>
<code>pending</code>
<message>pending</message>
</stateReason>
<architecture>i386</architecture>
<rootDeviceType>ebs</rootDeviceType>
<rootDeviceName>/dev/sda1</rootDeviceName>
<blockDeviceMapping/>
<virtualizationType>paravirtual</virtualizationType>
<clientToken/>
<hypervisor>xen</hypervisor>
<networkInterfaceSet/>
<iamInstanceProfile>
<arn>arn:aws:iam::ownerid:instance-profile/myinstanceprofile</arn>
<id>iamid</id>
</iamInstanceProfile>
</item>
</instancesSet>
</RunInstancesResponse>
"""
class TestRunInstanceResponseParsing(unittest.TestCase):
def testIAMInstanceProfileParsedCorrectly(self):
ec2 = EC2Connection(aws_access_key_id='aws_access_key_id',
aws_secret_access_key='aws_secret_access_key')
mock_response = mock.Mock()
mock_response.read.return_value = RUN_INSTANCE_RESPONSE
mock_response.status = 200
ec2.make_request = mock.Mock(return_value=mock_response)
reservation = ec2.run_instances(image_id='ami-12345')
self.assertEqual(len(reservation.instances), 1)
instance = reservation.instances[0]
self.assertEqual(instance.image_id, 'ami-ed65ba84')
# iamInstanceProfile has an ID element, so we want to make sure
# that this does not map to instance.id (which should be the
# id of the ec2 instance).
self.assertEqual(instance.id, 'i-ff0f1299')
self.assertDictEqual(
instance.instance_profile,
{'arn': ('arn:aws:iam::ownerid:'
'instance-profile/myinstanceprofile'),
'id': 'iamid'})
class TestRunInstances(AWSMockServiceTestCase):
connection_class = EC2Connection
def default_body(self):
# This is a dummy response
return b"""
<DescribeLaunchConfigurationsResponse>
</DescribeLaunchConfigurationsResponse>
"""
def test_run_instances_user_data(self):
self.set_http_response(status_code=200)
response = self.service_connection.run_instances(
image_id='123456',
instance_type='m1.large',
security_groups=['group1', 'group2'],
user_data='#!/bin/bash'
)
self.assert_request_parameters({
'Action': 'RunInstances',
'ImageId': '123456',
'InstanceType': 'm1.large',
'UserData': base64.b64encode(b'#!/bin/bash').decode('utf-8'),
'MaxCount': 1,
'MinCount': 1,
'SecurityGroup.1': 'group1',
'SecurityGroup.2': 'group2',
}, ignore_params_values=[
'Version', 'AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion',
'Timestamp'
])
class TestDescribeInstances(AWSMockServiceTestCase):
connection_class = EC2Connection
def default_body(self):
return DESCRIBE_INSTANCE_VPC
def test_multiple_private_ip_addresses(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.get_all_reservations()
self.assertEqual(len(api_response), 1)
instances = api_response[0].instances
self.assertEqual(len(instances), 1)
instance = instances[0]
self.assertEqual(len(instance.interfaces), 1)
interface = instance.interfaces[0]
self.assertEqual(len(interface.private_ip_addresses), 3)
addresses = interface.private_ip_addresses
self.assertEqual(addresses[0].private_ip_address, '10.0.0.67')
self.assertTrue(addresses[0].primary)
self.assertEqual(addresses[1].private_ip_address, '10.0.0.54')
self.assertFalse(addresses[1].primary)
self.assertEqual(addresses[2].private_ip_address, '10.0.0.55')
self.assertFalse(addresses[2].primary)
if __name__ == '__main__':
unittest.main()
| mit |
UCSBarchlab/PyRTL | examples/example8-verilog.py | 1 | 5323 | """ Example 8: Interfacing with Verilog.
While there is much more about PyRTL design to discuss, at some point somebody
might ask you to do something with your code other than have it print
pretty things out to the terminal. We provide import from and export to
Verilog of designs, export of waveforms to VCD, and a set of transforms
that make doing netlist-level transforms and analysis directly in PyRTL easy.
"""
import random
import io
import pyrtl
# ---- Importing From Verilog ----
# Sometimes it is useful to pull in components written in Verilog to be used
# as subcomponents of PyRTL designs or to be subject to analysis written over
# the PyRTL core. One standard format supported by PyRTL is "blif" format:
# https://www.ece.cmu.edu/~ee760/760docs/blif.pdf
# Many tools supoprt outputting hardware designs to this format, including the
# free open source project "Yosys". Blif files can then be imported either
# as a string or directly from a file name by the function input_from_blif.
# Here is a simple example of a 1-bit full adder imported and then simulated
# from this blif format.
full_adder_blif = """
# Generated by Yosys 0.3.0+ (git sha1 7e758d5, clang 3.4-1ubuntu3 -fPIC -Os)
.model full_adder
.inputs x y cin
.outputs sum cout
.names $false
.names $true
1
.names y $not$FA.v:12$3_Y
0 1
.names x $not$FA.v:11$1_Y
0 1
.names cin $not$FA.v:15$6_Y
0 1
.names ind3 ind4 sum
1- 1
-1 1
.names $not$FA.v:15$6_Y ind2 ind3
11 1
.names x $not$FA.v:12$3_Y ind1
11 1
.names ind2 $not$FA.v:16$8_Y
0 1
.names cin $not$FA.v:16$8_Y ind4
11 1
.names x y $and$FA.v:19$11_Y
11 1
.names ind0 ind1 ind2
1- 1
-1 1
.names cin ind2 $and$FA.v:19$12_Y
11 1
.names $and$FA.v:19$11_Y $and$FA.v:19$12_Y cout
1- 1
-1 1
.names $not$FA.v:11$1_Y y ind0
11 1
.end
"""
pyrtl.input_from_blif(full_adder_blif)
# Have to find the actual wire vectors generated from the names in the blif file
x, y, cin = [pyrtl.working_block().get_wirevector_by_name(s) for s in ['x', 'y', 'cin']]
io_vectors = pyrtl.working_block().wirevector_subset((pyrtl.Input, pyrtl.Output))
# We are only going to trace the input and output vectors for clarity
sim_trace = pyrtl.SimulationTrace(wires_to_track=io_vectors)
# Now simulate the logic with some random inputs
sim = pyrtl.Simulation(tracer=sim_trace)
for i in range(15):
# here we actually generate random booleans for the inputs
sim.step({
'x': random.choice([0, 1]),
'y': random.choice([0, 1]),
'cin': random.choice([0, 1])
})
sim_trace.render_trace(symbol_len=5, segment_size=5)
# ---- Exporting to Verilog ----
# However, not only do we want to have a method to import from Verilog, we also
# want a way to export it back out to Verilog as well. To demonstrate PyRTL's
# ability to export in Verilog, we will create a sample 3-bit counter. However
# unlike the example in example2, we extend it to be synchronously resetting.
pyrtl.reset_working_block()
zero = pyrtl.Input(1, 'zero')
counter_output = pyrtl.Output(3, 'counter_output')
counter = pyrtl.Register(3, 'counter')
counter.next <<= pyrtl.mux(zero, counter + 1, 0)
counter_output <<= counter
# The counter gets 0 in the next cycle if the "zero" signal goes high, otherwise just
# counter + 1. Note that both "0" and "1" are bit extended to the proper length and
# here we are making use of that native add operation. Let's dump this bad boy out
# to a Verilog file and see what is looks like (here we are using StringIO just to
# print it to a string for demo purposes; most likely you will want to pass a normal
# open file).
print("--- PyRTL Representation ---")
print(pyrtl.working_block())
print()
print("--- Verilog for the Counter ---")
with io.StringIO() as vfile:
pyrtl.output_to_verilog(vfile)
print(vfile.getvalue())
print("--- Simulation Results ---")
sim_trace = pyrtl.SimulationTrace([counter_output, zero])
sim = pyrtl.Simulation(tracer=sim_trace)
for cycle in range(15):
sim.step({'zero': random.choice([0, 0, 0, 1])})
sim_trace.render_trace()
# We already did the "hard" work of generating a test input for this simulation, so
# we might want to reuse that work when we take this design through a Verilog toolchain.
# The class OutputVerilogTestbench grabs the inputs used in the simulation trace
# and sets them up in a standard verilog testbench.
print("--- Verilog for the TestBench ---")
with io.StringIO() as tbfile:
pyrtl.output_verilog_testbench(dest_file=tbfile, simulation_trace=sim_trace)
print(tbfile.getvalue())
# Now let's talk about transformations of the hardware block. Many times when you are
# doing some hardware-level analysis you might wish to ignore higher level things like
# multi-bit wirevectors, adds, concatenation, etc. and just think about wires and basic
# gates. PyRTL supports "lowering" of designs into this more restricted set of functionality
# though the function "synthesize". Once we lower a design to this form we can then apply
# basic optimizations like constant propagation and dead wire elimination as well. By
# printing it out to Verilog we can see exactly how the design changed.
print("--- Optimized Single-bit Verilog for the Counter ---")
pyrtl.synthesize()
pyrtl.optimize()
with io.StringIO() as vfile:
pyrtl.output_to_verilog(vfile)
print(vfile.getvalue())
| bsd-3-clause |
cmusatyalab/elijah-discovery-basic | elijah/discovery/monitor/file_cache.py | 2 | 7495 | #!/usr/bin/env python
#
# Cloudlet Infrastructure for Mobile Computing
#
# Author: Kiryong Ha <krha@cmu.edu>
#
# Copyright (C) 2011-2013 Carnegie Mellon University
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import threading
from multiprocessing import Queue
import multiprocessing
import time
import sys
from fuse import FUSE, FuseOSError, Operations
from optparse import OptionParser
from Queue import Empty
from fusecache import LoopBack
from fusecache import AccessInfo
from ..config import DiscoveryConst as DiscoveryConst
from ..log import logging
LOG = logging.getLogger(__name__)
_cache_monitor_instance = None
_fuse_instance = None
def get_instance():
global _cache_monitor_instance
global _fuse_instance
if _cache_monitor_instance is None:
LOG.info("[CACHE] FUSE mount at %s, which is loop back of %s" % \
(DiscoveryConst.CLOUDLET_FS_ROOT, DiscoveryConst.DFS_ROOT))
access_queue = Queue()
_fuse_instance = FuseLauncher(DiscoveryConst.CLOUDLET_FS_ROOT,\
DiscoveryConst.DFS_ROOT, access_queue)
_fuse_instance.start()
LOG.info("[CACHE] start Cache monitoring")
_cache_monitor_instance = _CacheMonitor(access_queue,\
DiscoveryConst.DFS_ROOT, print_out=False)
_cache_monitor_instance.start()
return _cache_monitor_instance
def terminate():
global _cache_monitor_instance
global _fuse_instance
if _cache_monitor_instance is not None:
_cache_monitor_instance.terminate()
_cache_monitor_instance = None
if _fuse_instance is not None:
_fuse_instance.terminate()
_fuse_instance = None
class CacheMonitorError(Exception):
pass
class _CacheMonitor(threading.Thread):
def __init__(self, access_queue, dfs_root, print_out=False):
self.access_queue = access_queue
self.dfs_root = dfs_root
self.print_out = print_out
self.stop = threading.Event()
self.cache_info_dict = dict() # inode:cache_status
threading.Thread.__init__(self, target=self.process)
def process(self):
while (self.stop.wait(0.01) is False):
try:
access = self.access_queue.get_nowait()
if access.cmd == AccessInfo.CMD_READ or \
access.cmd == AccessInfo.CMD_WRITE:
self.cache_info_dict[access.inode] = access
if self.print_out is True:
print access
except Empty:
continue
def cached_files(self):
file_list = list()
for (inode, access) in self.cache_info_dict.iteritems():
relpath = os.path.relpath(access.full_path, self.dfs_root)
file_list.append(relpath)
file_list.sort()
return file_list
def check_file(self, filename, is_abspath=False):
if is_abspath is True:
abspath = filename
else:
abspath = os.path.join(self.dfs_root, filename)
if os.path.exists(abspath) is False:
return False
else:
inode = os.stat(abspath).st_ino
access_info = self.cache_info_dict.get(inode, None)
if access_info is not None:
return True
else:
return False
def terminate(self):
LOG.info("get signal")
self.stop.set()
class CmdInterface(threading.Thread):
def __init__(self, cache_monitor):
self.cache_monitor = cache_monitor
threading.Thread.__init__(self, target=self.run)
def run(self):
while True:
user_input = raw_input("> ").lower().strip()
if user_input == "list":
print "\n".join(self.cache_monitor.cached_files())
elif len(user_input) == 0:
continue
elif user_input == 'q':
break
else:
print "Invalid command"
def terminate(self):
pass
class FuseLauncher(multiprocessing.Process):
def __init__(self, mountpoint, root, access_queue):
self.stop = threading.Event()
self.mountpoint = mountpoint
self.root = root
self.access_queue = access_queue
if os.path.isdir(self.root) is False or\
os.access(self.root, os.R_OK | os.W_OK) is False:
msg = "Failed to setup cache monitoring at %s\n" % self.root
msg += "Please create a directory for the distributed file system at %s\n" %\
self.root
msg += "Or you can change path to the directory at\n"
msg += "elijah-discovery/elijah/discovery/Const.py, DFS_ROOT variable"
raise CacheMonitorError(msg)
if os.path.isdir(self.mountpoint) is False or\
os.access(self.mountpoint, os.R_OK | os.W_OK) is False:
msg = "Failed to setup cache monitoring at %s\n" % self.mountpoint
msg += "Please create a directory for the loopback at %s\n" %\
self.mountpoint
msg += "Or you can change path to the directory at\n"
msg += "elijah-discovery/elijah/discovery/Const.py, "
msg += "CLOUDLET_FS_ROOT variable"
raise CacheMonitorError(msg)
multiprocessing.Process.__init__(self)
def run(self):
FUSE(LoopBack(self.root, self.access_queue), self.mountpoint, foreground=True)
def terminate(self):
self.stop.set()
def process_command_line(argv):
VERSION = '0.1'
DESCRIPTION = 'Cache monitor'
parser = OptionParser(usage='%prog [mount_point] [root] [options]',
version=VERSION, description=DESCRIPTION)
parser.add_option(
'-v', '--verbose', action='store_true', dest='print_console', default=False,
help="print out access info in realtime")
settings, args = parser.parse_args(argv)
if len(args) is not 2:
parser.error("Need mount point and root path")
mount_point = args[0]
lookback_path = args[1]
return mount_point, lookback_path, settings
def main():
mountpoint, root, settings = process_command_line(sys.argv[1:])
access_queue = Queue()
fuse = FuseLauncher(mountpoint, root, access_queue)
cache_monitor = _CacheMonitor(access_queue, settings.print_console)
cmdline_interface = None
if not settings.print_console:
cmdline_interface = CmdInterface(cache_monitor)
cmdline_interface.start()
fuse.start()
cache_monitor.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt as e:
print "User interrupt"
ret_code = 1
finally:
if cache_monitor is not None:
cache_monitor.terminate()
if fuse is not None:
fuse.terminate()
if cmdline_interface is not None:
cmdline_interface.terminate()
if __name__ == '__main__':
main()
| apache-2.0 |
Xeralux/tensorflow | tensorflow/python/framework/smart_cond.py | 1 | 4733 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""smart_cond and related utilties."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
def smart_cond(pred, true_fn=None, false_fn=None, name=None):
"""Return either `true_fn()` if predicate `pred` is true else `false_fn()`.
If `pred` is a bool or has a constant value, we return either `true_fn()`
or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both.
Arguments:
pred: A scalar determining whether to return the result of `true_fn` or
`false_fn`.
true_fn: The callable to be performed if pred is true.
false_fn: The callable to be performed if pred is false.
name: Optional name prefix when using `tf.cond`.
Returns:
Tensors returned by the call to either `true_fn` or `false_fn`.
Raises:
TypeError: If `true_fn` or `false_fn` is not callable.
"""
if not callable(true_fn):
raise TypeError("`true_fn` must be callable.")
if not callable(false_fn):
raise TypeError("`false_fn` must be callable.")
pred_value = smart_constant_value(pred)
if pred_value is not None:
if pred_value:
return true_fn()
else:
return false_fn()
else:
return control_flow_ops.cond(pred, true_fn=true_fn, false_fn=false_fn,
name=name)
def smart_constant_value(pred):
"""Return the bool value for `pred`, or None if `pred` had a dynamic value.
Arguments:
pred: A scalar, either a Python bool or tensor.
Returns:
True or False if `pred` has a constant boolean value, None otherwise.
Raises:
TypeError: If `pred` is not a Tensor or bool.
"""
if pred in {0, 1}: # Accept 1/0 as valid boolean values
pred_value = bool(pred)
elif isinstance(pred, bool):
pred_value = pred
elif isinstance(pred, ops.Tensor):
pred_value = tensor_util.constant_value(pred)
# TODO(skyewm): consider folding this into tensor_util.constant_value when
# _USE_C_API is removed (there may be performance and correctness bugs, so I
# wanted to limit the change hidden behind _USE_C_API).
# pylint: disable=protected-access
if pred_value is None and ops._USE_C_API:
with errors.raise_exception_on_not_ok_status() as status:
pred_value = c_api.TF_TryEvaluateConstant_wrapper(
pred.graph._c_graph, pred._as_tf_output(), status)
# pylint: enable=protected-access
else:
raise TypeError("`pred` must be a Tensor, or a Python bool, or 1 or 0. "
"Found instead: %s" % pred)
return pred_value
def smart_case(pred_fn_pairs, default=None, exclusive=False, name="smart_case"):
"""Like tf.case, except attempts to statically evaluate predicates.
If any predicate in `pred_fn_pairs` is a bool or has a constant value, the
associated callable will be called or omitted depending on its value.
Otherwise this functions like tf.case.
Args:
pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor and a
callable which returns a list of tensors.
default: Optional callable that returns a list of tensors.
exclusive: True iff at most one predicate is allowed to evaluate to `True`.
name: A name for this operation (optional).
Returns:
The tensors returned by the first pair whose predicate evaluated to True, or
those returned by `default` if none does.
Raises:
TypeError: If `pred_fn_pairs` is not a list/dictionary.
TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.
TypeError: If `fns[i]` is not callable for any i, or `default` is not
callable.
"""
return control_flow_ops._case_helper( # pylint: disable=protected-access
smart_cond, pred_fn_pairs, default, exclusive, name,
allow_python_preds=True)
| apache-2.0 |
Aldriana/ShadowCraft-Engine | shadowcraft/objects/talents.py | 1 | 2840 | from shadowcraft.core import exceptions
from shadowcraft.objects import talents_data
class InvalidTalentException(exceptions.InvalidInputException):
pass
class Talents(object):
def __init__(self, talent_string, game_class='rogue', level='90'):
self.game_class = game_class
self.class_talents = talents_data.talents[game_class]
self.level = level
self.allowed_talents = [talent for tier in self.class_talents for talent in tier]
self.allowed_talents_for_level = self.get_allowed_talents_for_level()
self.initialize_talents(talent_string)
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
def __getattr__(self, name):
# If someone tries to access a talent not initialized (the talent
# string was shorter than 6) we return False
if name in self.allowed_talents:
return False
object.__getattribute__(self, name)
def get_allowed_talents_for_level(self):
allowed_talents_for_level = []
for i in xrange(self.get_top_tier()):
for talent in self.class_talents[i]:
allowed_talents_for_level.append(talent)
return allowed_talents_for_level
def is_allowed_talent(self, name, check_level=False):
if check_level:
return name in self.allowed_talents_for_level
else:
return name in self.allowed_talents
def get_top_tier(self):
levels = (15, 30, 45, 60, 75, 90)
top_tier = 0
for i in levels:
if self.level >= i:
top_tier += 1
return top_tier
def initialize_talents(self, talent_string):
if len(talent_string) > 6:
raise InvalidTalentException(_('Talent strings must be 6 or less characters long'))
j = 0
for i in talent_string:
if int(i) not in range(4):
raise InvalidTalentException(_('Values in the talent string must be 0, 1, 2, 3, or sometimes 4'))
if int(i) == 0 or i == '.':
pass
else:
setattr(self, self.class_talents[j][int(i) - 1], True)
j += 1
def reset_talents(self):
for talent in self.allowed_talents:
setattr(self, talent, False)
def get_tier_for_talent(self, name):
if name not in self.allowed_talents:
return None
tier = 0
for i in xrange(6):
if name in self.class_talents[i]:
return i
def set_talent(self, name):
# Clears talents in the tier and sets the new one
if name not in self.allowed_talents:
return False
for talent in self.class_talents[self.get_tier_for_talent(name)]:
setattr(self, talent, False)
setattr(self, name, True)
| lgpl-3.0 |
yfried/ansible | lib/ansible/modules/cloud/cloudstack/cs_router.py | 37 | 10470 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_router
short_description: Manages routers on Apache CloudStack based clouds.
description:
- Start, restart, stop and destroy routers.
- C(state=present) is not able to create routers, use M(cs_network) instead.
version_added: "2.2"
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the router.
required: true
service_offering:
description:
- Name or id of the service offering of the router.
domain:
description:
- Domain the router is related to.
account:
description:
- Account the router is related to.
project:
description:
- Name of the project the router is related to.
zone:
description:
- Name of the zone the router is deployed in.
- If not set, all zones are used.
version_added: "2.4"
state:
description:
- State of the router.
default: 'present'
choices: [ 'present', 'absent', 'started', 'stopped', 'restarted' ]
poll_async:
description:
- Poll async jobs until job has finished.
default: yes
type: bool
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Ensure the router has the desired service offering, no matter if
# the router is running or not.
- local_action:
module: cs_router
name: r-40-VM
service_offering: System Offering for Software Router
# Ensure started
- local_action:
module: cs_router
name: r-40-VM
state: started
# Ensure started with desired service offering.
# If the service offerings changes, router will be rebooted.
- local_action:
module: cs_router
name: r-40-VM
service_offering: System Offering for Software Router
state: started
# Ensure stopped
- local_action:
module: cs_router
name: r-40-VM
state: stopped
# Remove a router
- local_action:
module: cs_router
name: r-40-VM
state: absent
'''
RETURN = '''
---
id:
description: UUID of the router.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the router.
returned: success
type: string
sample: r-40-VM
created:
description: Date of the router was created.
returned: success
type: string
sample: 2014-12-01T14:57:57+0100
template_version:
description: Version of the system VM template.
returned: success
type: string
sample: 4.5.1
requires_upgrade:
description: Whether the router needs to be upgraded to the new template.
returned: success
type: bool
sample: false
redundant_state:
description: Redundant state of the router.
returned: success
type: string
sample: UNKNOWN
role:
description: Role of the router.
returned: success
type: string
sample: VIRTUAL_ROUTER
zone:
description: Name of zone the router is in.
returned: success
type: string
sample: ch-gva-2
service_offering:
description: Name of the service offering the router has.
returned: success
type: string
sample: System Offering For Software Router
state:
description: State of the router.
returned: success
type: string
sample: Active
domain:
description: Domain the router is related to.
returned: success
type: string
sample: ROOT
account:
description: Account the router is related to.
returned: success
type: string
sample: admin
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together,
)
class AnsibleCloudStackRouter(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackRouter, self).__init__(module)
self.returns = {
'serviceofferingname': 'service_offering',
'version': 'template_version',
'requiresupgrade': 'requires_upgrade',
'redundantstate': 'redundant_state',
'role': 'role'
}
self.router = None
def get_service_offering_id(self):
service_offering = self.module.params.get('service_offering')
if not service_offering:
return None
args = {
'issystem': True
}
service_offerings = self.query_api('listServiceOfferings', **args)
if service_offerings:
for s in service_offerings['serviceoffering']:
if service_offering in [s['name'], s['id']]:
return s['id']
self.module.fail_json(msg="Service offering '%s' not found" % service_offering)
def get_router(self):
if not self.router:
router = self.module.params.get('name')
args = {
'projectid': self.get_project(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'listall': True,
'fetch_list': True,
}
if self.module.params.get('zone'):
args['zoneid'] = self.get_zone(key='id')
routers = self.query_api('listRouters', **args)
if routers:
for r in routers:
if router.lower() in [r['name'].lower(), r['id']]:
self.router = r
break
return self.router
def start_router(self):
router = self.get_router()
if not router:
self.module.fail_json(msg="Router not found")
if router['state'].lower() != "running":
self.result['changed'] = True
args = {
'id': router['id'],
}
if not self.module.check_mode:
res = self.query_api('startRouter', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
router = self.poll_job(res, 'router')
return router
def stop_router(self):
router = self.get_router()
if not router:
self.module.fail_json(msg="Router not found")
if router['state'].lower() != "stopped":
self.result['changed'] = True
args = {
'id': router['id'],
}
if not self.module.check_mode:
res = self.query_api('stopRouter', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
router = self.poll_job(res, 'router')
return router
def reboot_router(self):
router = self.get_router()
if not router:
self.module.fail_json(msg="Router not found")
self.result['changed'] = True
args = {
'id': router['id'],
}
if not self.module.check_mode:
res = self.query_api('rebootRouter', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
router = self.poll_job(res, 'router')
return router
def absent_router(self):
router = self.get_router()
if router:
self.result['changed'] = True
args = {
'id': router['id'],
}
if not self.module.check_mode:
res = self.query_api('destroyRouter', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'router')
return router
def present_router(self):
router = self.get_router()
if not router:
self.module.fail_json(msg="Router can not be created using the API, see cs_network.")
args = {
'id': router['id'],
'serviceofferingid': self.get_service_offering_id(),
}
state = self.module.params.get('state')
if self.has_changed(args, router):
self.result['changed'] = True
if not self.module.check_mode:
current_state = router['state'].lower()
self.stop_router()
router = self.query_api('changeServiceForRouter', **args)
if state in ['restarted', 'started']:
router = self.start_router()
# if state=present we get to the state before the service
# offering change.
elif state == "present" and current_state == "running":
router = self.start_router()
elif state == "started":
router = self.start_router()
elif state == "stopped":
router = self.stop_router()
elif state == "restarted":
router = self.reboot_router()
return router
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
service_offering=dict(),
state=dict(choices=['present', 'started', 'stopped', 'restarted', 'absent'], default="present"),
domain=dict(),
account=dict(),
project=dict(),
zone=dict(),
poll_async=dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_router = AnsibleCloudStackRouter(module)
state = module.params.get('state')
if state in ['absent']:
router = acs_router.absent_router()
else:
router = acs_router.present_router()
result = acs_router.get_result(router)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
GustavoHennig/ansible | lib/ansible/modules/cloud/azure/azure.py | 5 | 24573 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: azure
short_description: create or terminate a virtual machine in azure
description:
- Creates or terminates azure instances. When created optionally waits for it to be 'running'.
version_added: "1.7"
options:
name:
description:
- name of the virtual machine and associated cloud service.
required: true
default: null
location:
description:
- the azure location to use (e.g. 'East US')
required: true
default: null
subscription_id:
description:
- azure subscription id. Overrides the AZURE_SUBSCRIPTION_ID environment variable.
required: false
default: null
management_cert_path:
description:
- path to an azure management certificate associated with the subscription id. Overrides the AZURE_CERT_PATH environment variable.
required: false
default: null
storage_account:
description:
- the azure storage account in which to store the data disks.
required: true
image:
description:
- system image for creating the virtual machine (e.g., b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_3-LTS-amd64-server-20131205-en-us-30GB)
required: true
default: null
role_size:
description:
- azure role size for the new virtual machine (e.g., Small, ExtraLarge, A6). You have to pay attention to the fact that instances of type G and DS are not available in all regions (locations). Make sure if you selected the size and type of instance available in your chosen location.
required: false
default: Small
endpoints:
description:
- a comma-separated list of TCP ports to expose on the virtual machine (e.g., "22,80")
required: false
default: 22
user:
description:
- the unix username for the new virtual machine.
required: false
default: null
password:
description:
- the unix password for the new virtual machine.
required: false
default: null
ssh_cert_path:
description:
- path to an X509 certificate containing the public ssh key to install in the virtual machine. See http://www.windowsazure.com/en-us/manage/linux/tutorials/intro-to-linux/ for more details.
- if this option is specified, password-based ssh authentication will be disabled.
required: false
default: null
virtual_network_name:
description:
- Name of virtual network.
required: false
default: null
hostname:
description:
- hostname to write /etc/hostname. Defaults to <name>.cloudapp.net.
required: false
default: null
wait:
description:
- wait for the instance to be in state 'running' before returning
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 600
aliases: []
wait_timeout_redirects:
description:
- how long before wait gives up for redirects, in seconds
default: 300
aliases: []
state:
description:
- create or terminate instances
required: false
default: 'present'
aliases: []
auto_updates:
description:
- Enable Auto Updates on Windows Machines
required: false
version_added: "2.0"
default: "no"
choices: [ "yes", "no" ]
enable_winrm:
description:
- Enable winrm on Windows Machines
required: false
version_added: "2.0"
default: "yes"
choices: [ "yes", "no" ]
os_type:
description:
- The type of the os that is gettings provisioned
required: false
version_added: "2.0"
default: "linux"
choices: [ "windows", "linux" ]
requirements:
- "python >= 2.6"
- "azure >= 0.7.1"
author: "John Whitbeck (@jwhitbeck)"
'''
EXAMPLES = '''
# Note: None of these examples set subscription_id or management_cert_path
# It is assumed that their matching environment variables are set.
- name: Provision virtual machine example
azure:
name: my-virtual-machine
role_size: Small
image: b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_3-LTS-amd64-server-20131205-en-us-30GB
location: East US
user: ubuntu
ssh_cert_path: /path/to/azure_x509_cert.pem
storage_account: my-storage-account
wait: True
state: present
delegate_to: localhost
- name: Terminate virtual machine example
azure:
name: my-virtual-machine
state: absent
delegate_to: localhost
- name: Create windows machine
azure:
name: ben-Winows-23
hostname: win123
os_type: windows
enable_winrm: True
subscription_id: '{{ azure_sub_id }}'
management_cert_path: '{{ azure_cert_path }}'
role_size: Small
image: bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2012-x64-v13.5
location: East Asia
password: xxx
storage_account: benooytes
user: admin
wait: True
state: present
virtual_network_name: '{{ vnet_name }}'
delegate_to: localhost
'''
import base64
import datetime
import os
import time
from urlparse import urlparse
from ansible.module_utils.facts import * # TimeoutError
AZURE_LOCATIONS = ['South Central US',
'Central US',
'East US 2',
'East US',
'West US',
'North Central US',
'North Europe',
'West Europe',
'East Asia',
'Southeast Asia',
'Japan West',
'Japan East',
'Brazil South']
AZURE_ROLE_SIZES = ['ExtraSmall',
'Small',
'Medium',
'Large',
'ExtraLarge',
'A5',
'A6',
'A7',
'A8',
'A9',
'Basic_A0',
'Basic_A1',
'Basic_A2',
'Basic_A3',
'Basic_A4',
'Standard_D1',
'Standard_D2',
'Standard_D3',
'Standard_D4',
'Standard_D11',
'Standard_D12',
'Standard_D13',
'Standard_D14',
'Standard_D1_v2',
'Standard_D2_v2',
'Standard_D3_v2',
'Standard_D4_v2',
'Standard_D5_v2',
'Standard_D11_v2',
'Standard_D12_v2',
'Standard_D13_v2',
'Standard_D14_v2',
'Standard_DS1',
'Standard_DS2',
'Standard_DS3',
'Standard_DS4',
'Standard_DS11',
'Standard_DS12',
'Standard_DS13',
'Standard_DS14',
'Standard_G1',
'Standard_G2',
'Standard_G3',
'Standard_G4',
'Standard_G5']
from distutils.version import LooseVersion
try:
import azure as windows_azure
if hasattr(windows_azure, '__version__') and LooseVersion(windows_azure.__version__) <= "0.11.1":
from azure import WindowsAzureError as AzureException
from azure import WindowsAzureMissingResourceError as AzureMissingException
else:
from azure.common import AzureException as AzureException
from azure.common import AzureMissingResourceHttpError as AzureMissingException
from azure.servicemanagement import (ServiceManagementService, OSVirtualHardDisk, SSH, PublicKeys,
PublicKey, LinuxConfigurationSet, ConfigurationSetInputEndpoints,
ConfigurationSetInputEndpoint, Listener, WindowsConfigurationSet)
HAS_AZURE = True
except ImportError:
HAS_AZURE = False
from types import MethodType
import json
def _wait_for_completion(azure, promise, wait_timeout, msg):
if not promise:
return
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time():
operation_result = azure.get_operation_status(promise.request_id)
time.sleep(5)
if operation_result.status == "Succeeded":
return
raise AzureException('Timed out waiting for async operation ' + msg + ' "' + str(promise.request_id) + '" to complete.')
def _delete_disks_when_detached(azure, wait_timeout, disk_names):
def _handle_timeout(signum, frame):
raise TimeoutError("Timeout reached while waiting for disks to become detached.")
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(wait_timeout)
try:
while len(disk_names) > 0:
for disk_name in disk_names:
disk = azure.get_disk(disk_name)
if disk.attached_to is None:
azure.delete_disk(disk.name, True)
disk_names.remove(disk_name)
except AzureException as e:
module.fail_json(msg="failed to get or delete disk, error was: %s" % (disk_name, str(e)))
finally:
signal.alarm(0)
def get_ssh_certificate_tokens(module, ssh_cert_path):
"""
Returns the sha1 fingerprint and a base64-encoded PKCS12 version of the certificate.
"""
# This returns a string such as SHA1 Fingerprint=88:60:0B:13:A9:14:47:DA:4E:19:10:7D:34:92:2B:DF:A1:7D:CA:FF
rc, stdout, stderr = module.run_command(['openssl', 'x509', '-in', ssh_cert_path, '-fingerprint', '-noout'])
if rc != 0:
module.fail_json(msg="failed to generate the key fingerprint, error was: %s" % stderr)
fingerprint = stdout.strip()[17:].replace(':', '')
rc, stdout, stderr = module.run_command(['openssl', 'pkcs12', '-export', '-in', ssh_cert_path, '-nokeys', '-password', 'pass:'])
if rc != 0:
module.fail_json(msg="failed to generate the pkcs12 signature from the certificate, error was: %s" % stderr)
pkcs12_base64 = base64.b64encode(stdout.strip())
return (fingerprint, pkcs12_base64)
def create_virtual_machine(module, azure):
"""
Create new virtual machine
module : AnsibleModule object
azure: authenticated azure ServiceManagementService object
Returns:
True if a new virtual machine and/or cloud service was created, false otherwise
"""
name = module.params.get('name')
os_type = module.params.get('os_type')
hostname = module.params.get('hostname') or name + ".cloudapp.net"
endpoints = module.params.get('endpoints').split(',')
ssh_cert_path = module.params.get('ssh_cert_path')
user = module.params.get('user')
password = module.params.get('password')
location = module.params.get('location')
role_size = module.params.get('role_size')
storage_account = module.params.get('storage_account')
image = module.params.get('image')
virtual_network_name = module.params.get('virtual_network_name')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
# Check if a deployment with the same name already exists
cloud_service_name_available = azure.check_hosted_service_name_availability(name)
if cloud_service_name_available.result:
# cloud service does not exist; create it
try:
result = azure.create_hosted_service(service_name=name, label=name, location=location)
_wait_for_completion(azure, result, wait_timeout, "create_hosted_service")
changed = True
except AzureException as e:
module.fail_json(msg="failed to create the new service, error was: %s" % str(e))
try:
# check to see if a vm with this name exists; if so, do nothing
azure.get_role(name, name, name)
except AzureMissingException:
# vm does not exist; create it
if os_type == 'linux':
# Create linux configuration
disable_ssh_password_authentication = not password
vm_config = LinuxConfigurationSet(hostname, user, password, disable_ssh_password_authentication)
else:
#Create Windows Config
vm_config = WindowsConfigurationSet(hostname, password, None, module.params.get('auto_updates'), None, user)
vm_config.domain_join = None
if module.params.get('enable_winrm'):
listener = Listener('Http')
vm_config.win_rm.listeners.listeners.append(listener)
else:
vm_config.win_rm = None
# Add ssh certificates if specified
if ssh_cert_path:
fingerprint, pkcs12_base64 = get_ssh_certificate_tokens(module, ssh_cert_path)
# Add certificate to cloud service
result = azure.add_service_certificate(name, pkcs12_base64, 'pfx', '')
_wait_for_completion(azure, result, wait_timeout, "add_service_certificate")
# Create ssh config
ssh_config = SSH()
ssh_config.public_keys = PublicKeys()
authorized_keys_path = u'/home/%s/.ssh/authorized_keys' % user
ssh_config.public_keys.public_keys.append(PublicKey(path=authorized_keys_path, fingerprint=fingerprint))
# Append ssh config to linux machine config
vm_config.ssh = ssh_config
# Create network configuration
network_config = ConfigurationSetInputEndpoints()
network_config.configuration_set_type = 'NetworkConfiguration'
network_config.subnet_names = []
network_config.public_ips = None
for port in endpoints:
network_config.input_endpoints.append(ConfigurationSetInputEndpoint(name='TCP-%s' % port,
protocol='TCP',
port=port,
local_port=port))
# First determine where to store disk
today = datetime.date.today().strftime('%Y-%m-%d')
disk_prefix = u'%s-%s' % (name, name)
media_link = u'http://%s.blob.core.windows.net/vhds/%s-%s.vhd' % (storage_account, disk_prefix, today)
# Create system hard disk
os_hd = OSVirtualHardDisk(image, media_link)
# Spin up virtual machine
try:
result = azure.create_virtual_machine_deployment(service_name=name,
deployment_name=name,
deployment_slot='production',
label=name,
role_name=name,
system_config=vm_config,
network_config=network_config,
os_virtual_hard_disk=os_hd,
role_size=role_size,
role_type='PersistentVMRole',
virtual_network_name=virtual_network_name)
_wait_for_completion(azure, result, wait_timeout, "create_virtual_machine_deployment")
changed = True
except AzureException as e:
module.fail_json(msg="failed to create the new virtual machine, error was: %s" % str(e))
try:
deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
return (changed, urlparse(deployment.url).hostname, deployment)
except AzureException as e:
module.fail_json(msg="failed to lookup the deployment information for %s, error was: %s" % (name, str(e)))
def terminate_virtual_machine(module, azure):
"""
Terminates a virtual machine
module : AnsibleModule object
azure: authenticated azure ServiceManagementService object
Returns:
True if a new virtual machine was deleted, false otherwise
"""
# Whether to wait for termination to complete before returning
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
name = module.params.get('name')
delete_empty_services = module.params.get('delete_empty_services')
changed = False
deployment = None
public_dns_name = None
disk_names = []
try:
deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
except AzureMissingException as e:
pass # no such deployment or service
except AzureException as e:
module.fail_json(msg="failed to find the deployment, error was: %s" % str(e))
# Delete deployment
if deployment:
changed = True
try:
# gather disk info
results = []
for role in deployment.role_list:
role_props = azure.get_role(name, deployment.name, role.role_name)
if role_props.os_virtual_hard_disk.disk_name not in disk_names:
disk_names.append(role_props.os_virtual_hard_disk.disk_name)
except AzureException as e:
module.fail_json(msg="failed to get the role %s, error was: %s" % (role.role_name, str(e)))
try:
result = azure.delete_deployment(name, deployment.name)
_wait_for_completion(azure, result, wait_timeout, "delete_deployment")
except AzureException as e:
module.fail_json(msg="failed to delete the deployment %s, error was: %s" % (deployment.name, str(e)))
# It's unclear when disks associated with terminated deployment get detached.
# Thus, until the wait_timeout is reached, we continue to delete disks as they
# become detached by polling the list of remaining disks and examining the state.
try:
_delete_disks_when_detached(azure, wait_timeout, disk_names)
except (AzureException, TimeoutError) as e:
module.fail_json(msg=str(e))
try:
# Now that the vm is deleted, remove the cloud service
result = azure.delete_hosted_service(service_name=name)
_wait_for_completion(azure, result, wait_timeout, "delete_hosted_service")
except AzureException as e:
module.fail_json(msg="failed to delete the service %s, error was: %s" % (name, str(e)))
public_dns_name = urlparse(deployment.url).hostname
return changed, public_dns_name, deployment
def get_azure_creds(module):
# Check module args for credentials, then check environment vars
subscription_id = module.params.get('subscription_id')
if not subscription_id:
subscription_id = os.environ.get('AZURE_SUBSCRIPTION_ID', None)
if not subscription_id:
module.fail_json(msg="No subscription_id provided. Please set 'AZURE_SUBSCRIPTION_ID' or use the 'subscription_id' parameter")
management_cert_path = module.params.get('management_cert_path')
if not management_cert_path:
management_cert_path = os.environ.get('AZURE_CERT_PATH', None)
if not management_cert_path:
module.fail_json(msg="No management_cert_path provided. Please set 'AZURE_CERT_PATH' or use the 'management_cert_path' parameter")
return subscription_id, management_cert_path
def main():
module = AnsibleModule(
argument_spec=dict(
ssh_cert_path=dict(),
name=dict(),
hostname=dict(),
os_type=dict(default='linux', choices=['linux', 'windows']),
location=dict(choices=AZURE_LOCATIONS),
role_size=dict(choices=AZURE_ROLE_SIZES),
subscription_id=dict(no_log=True),
storage_account=dict(),
management_cert_path=dict(),
endpoints=dict(default='22'),
user=dict(),
password=dict(no_log=True),
image=dict(),
virtual_network_name=dict(default=None),
state=dict(default='present'),
wait=dict(type='bool', default=False),
wait_timeout=dict(default=600),
wait_timeout_redirects=dict(default=300),
auto_updates=dict(type='bool', default=False),
enable_winrm=dict(type='bool', default=True),
)
)
if not HAS_AZURE:
module.fail_json(msg='azure python module required for this module')
# create azure ServiceManagementService object
subscription_id, management_cert_path = get_azure_creds(module)
wait_timeout_redirects = int(module.params.get('wait_timeout_redirects'))
if hasattr(windows_azure, '__version__') and LooseVersion(windows_azure.__version__) <= "0.8.0":
# wrapper for handling redirects which the sdk <= 0.8.0 is not following
azure = Wrapper(ServiceManagementService(subscription_id, management_cert_path), wait_timeout_redirects)
else:
azure = ServiceManagementService(subscription_id, management_cert_path)
cloud_service_raw = None
if module.params.get('state') == 'absent':
(changed, public_dns_name, deployment) = terminate_virtual_machine(module, azure)
elif module.params.get('state') == 'present':
# Changed is always set to true when provisioning new instances
if not module.params.get('name'):
module.fail_json(msg='name parameter is required for new instance')
if not module.params.get('image'):
module.fail_json(msg='image parameter is required for new instance')
if not module.params.get('user'):
module.fail_json(msg='user parameter is required for new instance')
if not module.params.get('location'):
module.fail_json(msg='location parameter is required for new instance')
if not module.params.get('storage_account'):
module.fail_json(msg='storage_account parameter is required for new instance')
if not (module.params.get('password') or module.params.get('ssh_cert_path')):
module.fail_json(msg='password or ssh_cert_path parameter is required for new instance')
(changed, public_dns_name, deployment) = create_virtual_machine(module, azure)
module.exit_json(changed=changed, public_dns_name=public_dns_name, deployment=json.loads(json.dumps(deployment, default=lambda o: o.__dict__)))
class Wrapper(object):
def __init__(self, obj, wait_timeout):
self.other = obj
self.wait_timeout = wait_timeout
def __getattr__(self, name):
if hasattr(self.other, name):
func = getattr(self.other, name)
return lambda *args, **kwargs: self._wrap(func, args, kwargs)
raise AttributeError(name)
def _wrap(self, func, args, kwargs):
if isinstance(func, MethodType):
result = self._handle_temporary_redirects(lambda: func(*args, **kwargs))
else:
result = self._handle_temporary_redirects(lambda: func(self.other, *args, **kwargs))
return result
def _handle_temporary_redirects(self, f):
wait_timeout = time.time() + self.wait_timeout
while wait_timeout > time.time():
try:
return f()
except AzureException as e:
if not str(e).lower().find("temporary redirect") == -1:
time.sleep(5)
pass
else:
raise e
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
MakesmithAccessibleTech/MakesmithGroundControl | CalibrationWidgets/enterDistanceBetweenMotors.py | 2 | 1864 | from kivy.uix.gridlayout import GridLayout
from kivy.properties import ObjectProperty
from kivy.properties import StringProperty
from UIElements.touchNumberInput import TouchNumberInput
from kivy.uix.popup import Popup
from kivy.app import App
import global_variables
class EnterDistanceBetweenMotors(GridLayout):
'''
Enter the manually measured distance between the motors.
'''
data = ObjectProperty(None) #linked externally
readyToMoveOn = ObjectProperty(None)
def on_Enter(self):
'''
This function runs when the step is entered
'''
self.data = App.get_running_app().data
def switchUnits(self):
if self.unitsBtnT.text == 'Units: mm':
self.unitsBtnT.text = 'Units: inches'
else:
self.unitsBtnT.text = 'Units: mm'
def enterValues(self):
'''
Manually enter the machine dimensions
'''
try:
motorsDist = float(self.motorsDist.text)
#convert from inches to mm if needed
if self.unitsBtnT.text == 'Units: inches':
motorsDist = motorsDist*25.4
#subtract off the width of the motors
motorsDist = motorsDist - 40.4
self.data.motorsDist = motorsDist
self.loadNextStep()
except Exception as e:
print e
def loadNextStep(self):
self.readyToMoveOn()
def on_Exit(self):
'''
This function run when the step is completed
'''
pass
| gpl-3.0 |
napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/global_/route_selection_options/__init__.py | 1 | 18286 | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class route_selection_options(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/route-selection-options. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Parameters relating to options for route selection
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "route-selection-options"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"global",
"route-selection-options",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/route_selection_options/config (container)
YANG Description: Configuration parameters relating to route selection
options
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/route_selection_options/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to route selection
options
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/route_selection_options/state (container)
YANG Description: State information for the route selection options
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/route_selection_options/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information for the route selection options
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
from . import config
from . import state
class route_selection_options(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/route-selection-options. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Parameters relating to options for route selection
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "route-selection-options"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"global",
"route-selection-options",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/route_selection_options/config (container)
YANG Description: Configuration parameters relating to route selection
options
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/route_selection_options/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to route selection
options
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/route_selection_options/state (container)
YANG Description: State information for the route selection options
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/route_selection_options/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information for the route selection options
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
| apache-2.0 |
coen-hyde/dotfiles | libs/eb/lib/aws/requests/packages/charade/euckrprober.py | 2931 | 1675 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import EUCKRSMModel
class EUCKRProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCKRSMModel)
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-KR"
| mit |
cmvelo/ansible | lib/ansible/cli/adhoc.py | 5 | 7287 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
########################################################
import os
from ansible import constants as C
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.inventory import Inventory
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.splitter import parse_kv
from ansible.playbook.play import Play
from ansible.plugins import get_all_plugin_loaders
from ansible.utils.vars import load_extra_vars
from ansible.utils.vars import load_options_vars
from ansible.utils.unicode import to_unicode
from ansible.vars import VariableManager
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
########################################################
class AdHocCLI(CLI):
''' code behind ansible ad-hoc cli'''
def parse(self):
''' create an options parser for bin/ansible '''
self.parser = CLI.base_parser(
usage='%prog <host-pattern> [options]',
runas_opts=True,
inventory_opts=True,
async_opts=True,
output_opts=True,
connect_opts=True,
check_opts=True,
runtask_opts=True,
vault_opts=True,
fork_opts=True,
module_opts=True,
)
# options unique to ansible ad-hoc
self.parser.add_option('-a', '--args', dest='module_args',
help="module arguments", default=C.DEFAULT_MODULE_ARGS)
self.parser.add_option('-m', '--module-name', dest='module_name',
help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME,
default=C.DEFAULT_MODULE_NAME)
self.options, self.args = self.parser.parse_args(self.args[1:])
if len(self.args) != 1:
raise AnsibleOptionsError("Missing target hosts")
display.verbosity = self.options.verbosity
self.validate_conflicts(runas_opts=True, vault_opts=True, fork_opts=True)
return True
def _play_ds(self, pattern, async, poll):
check_raw = self.options.module_name in ('command', 'shell', 'script', 'raw')
return dict(
name = "Ansible Ad-Hoc",
hosts = pattern,
gather_facts = 'no',
tasks = [ dict(action=dict(module=self.options.module_name, args=parse_kv(self.options.module_args, check_raw=check_raw)), async=async, poll=poll) ]
)
def run(self):
''' use Runner lib to do SSH things '''
super(AdHocCLI, self).run()
# only thing left should be host pattern
pattern = to_unicode(self.args[0], errors='strict')
# ignore connection password cause we are local
if self.options.connection == "local":
self.options.ask_pass = False
sshpass = None
becomepass = None
vault_pass = None
self.normalize_become_options()
(sshpass, becomepass) = self.ask_passwords()
passwords = { 'conn_pass': sshpass, 'become_pass': becomepass }
loader = DataLoader()
if self.options.vault_password_file:
# read vault_pass from a file
vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader=loader)
loader.set_vault_password(vault_pass)
elif self.options.ask_vault_pass:
vault_pass = self.ask_vault_passwords()[0]
loader.set_vault_password(vault_pass)
variable_manager = VariableManager()
variable_manager.extra_vars = load_extra_vars(loader=loader, options=self.options)
variable_manager.options_vars = load_options_vars(self.options)
inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=self.options.inventory)
variable_manager.set_inventory(inventory)
no_hosts = False
if len(inventory.list_hosts(pattern)) == 0:
# Empty inventory
display.warning("provided hosts list is empty, only localhost is available")
no_hosts = True
inventory.subset(self.options.subset)
hosts = inventory.list_hosts(pattern)
if len(hosts) == 0 and no_hosts is False:
# Invalid limit
raise AnsibleError("Specified --limit does not match any hosts")
if self.options.listhosts:
display.display(' hosts (%d):' % len(hosts))
for host in hosts:
display.display(' %s' % host)
return 0
if self.options.module_name in C.MODULE_REQUIRE_ARGS and not self.options.module_args:
err = "No argument passed to %s module" % self.options.module_name
if pattern.endswith(".yml"):
err = err + ' (did you mean to run ansible-playbook?)'
raise AnsibleOptionsError(err)
# dynamically load any plugins from the playbook directory
for name, obj in get_all_plugin_loaders():
if obj.subdir:
plugin_path = os.path.join('.', obj.subdir)
if os.path.isdir(plugin_path):
obj.add_directory(plugin_path)
play_ds = self._play_ds(pattern, self.options.seconds, self.options.poll_interval)
play = Play().load(play_ds, variable_manager=variable_manager, loader=loader)
if self.callback:
cb = self.callback
elif self.options.one_line:
cb = 'oneline'
else:
cb = 'minimal'
run_tree=False
if self.options.tree:
C.DEFAULT_CALLBACK_WHITELIST.append('tree')
C.TREE_DIR = self.options.tree
run_tree=True
# now create a task queue manager to execute the play
self._tqm = None
try:
self._tqm = TaskQueueManager(
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
options=self.options,
passwords=passwords,
stdout_callback=cb,
run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS,
run_tree=run_tree,
)
result = self._tqm.run(play)
finally:
if self._tqm:
self._tqm.cleanup()
if loader:
loader.cleanup_all_tmp_files()
return result
| gpl-3.0 |
pekeler/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_nis.py | 58 | 1317 | from test import test_support
import unittest
import nis
class NisTests(unittest.TestCase):
def test_maps(self):
try:
maps = nis.maps()
except nis.error, msg:
# NIS is probably not active, so this test isn't useful
if test_support.verbose:
print "Test Skipped:", msg
# Can't raise TestSkipped as regrtest only recognizes the exception
# import time.
return
try:
# On some systems, this map is only accessible to the
# super user
maps.remove("passwd.adjunct.byname")
except ValueError:
pass
done = 0
for nismap in maps:
mapping = nis.cat(nismap)
for k, v in mapping.items():
if not k:
continue
if nis.match(k, nismap) != v:
self.fail("NIS match failed for key `%s' in map `%s'" % (k, nismap))
else:
# just test the one key, otherwise this test could take a
# very long time
done = 1
break
if done:
break
def test_main():
test_support.run_unittest(NisTests)
if __name__ == '__main__':
test_main()
| apache-2.0 |
bjoshua/ansible | v1/ansible/inventory/group.py | 136 | 3395 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class Group(object):
''' a group of ansible hosts '''
__slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ]
def __init__(self, name=None):
self.depth = 0
self.name = name
self.hosts = []
self.vars = {}
self.child_groups = []
self.parent_groups = []
self._hosts_cache = None
#self.clear_hosts_cache()
if self.name is None:
raise Exception("group name is required")
def add_child_group(self, group):
if self == group:
raise Exception("can't add group to itself")
# don't add if it's already there
if not group in self.child_groups:
self.child_groups.append(group)
# update the depth of the child
group.depth = max([self.depth+1, group.depth])
# update the depth of the grandchildren
group._check_children_depth()
# now add self to child's parent_groups list, but only if there
# isn't already a group with the same name
if not self.name in [g.name for g in group.parent_groups]:
group.parent_groups.append(self)
self.clear_hosts_cache()
def _check_children_depth(self):
for group in self.child_groups:
group.depth = max([self.depth+1, group.depth])
group._check_children_depth()
def add_host(self, host):
self.hosts.append(host)
host.add_group(self)
self.clear_hosts_cache()
def set_variable(self, key, value):
self.vars[key] = value
def clear_hosts_cache(self):
self._hosts_cache = None
for g in self.parent_groups:
g.clear_hosts_cache()
def get_hosts(self):
if self._hosts_cache is None:
self._hosts_cache = self._get_hosts()
return self._hosts_cache
def _get_hosts(self):
hosts = []
seen = {}
for kid in self.child_groups:
kid_hosts = kid.get_hosts()
for kk in kid_hosts:
if kk not in seen:
seen[kk] = 1
hosts.append(kk)
for mine in self.hosts:
if mine not in seen:
seen[mine] = 1
hosts.append(mine)
return hosts
def get_variables(self):
return self.vars.copy()
def _get_ancestors(self):
results = {}
for g in self.parent_groups:
results[g.name] = g
results.update(g._get_ancestors())
return results
def get_ancestors(self):
return self._get_ancestors().values()
| gpl-3.0 |
noam09/deluge-telegramer | telegramer/include/future/backports/email/utils.py | 82 | 14270 | # Copyright (C) 2001-2010 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Miscellaneous utilities."""
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import utils
from future.builtins import bytes, int, str
__all__ = [
'collapse_rfc2231_value',
'decode_params',
'decode_rfc2231',
'encode_rfc2231',
'formataddr',
'formatdate',
'format_datetime',
'getaddresses',
'make_msgid',
'mktime_tz',
'parseaddr',
'parsedate',
'parsedate_tz',
'parsedate_to_datetime',
'unquote',
]
import os
import re
if utils.PY2:
re.ASCII = 0
import time
import base64
import random
import socket
from future.backports import datetime
from future.backports.urllib.parse import quote as url_quote, unquote as url_unquote
import warnings
from io import StringIO
from future.backports.email._parseaddr import quote
from future.backports.email._parseaddr import AddressList as _AddressList
from future.backports.email._parseaddr import mktime_tz
from future.backports.email._parseaddr import parsedate, parsedate_tz, _parsedate_tz
from quopri import decodestring as _qdecode
# Intrapackage imports
from future.backports.email.encoders import _bencode, _qencode
from future.backports.email.charset import Charset
COMMASPACE = ', '
EMPTYSTRING = ''
UEMPTYSTRING = ''
CRLF = '\r\n'
TICK = "'"
specialsre = re.compile(r'[][\\()<>@,:;".]')
escapesre = re.compile(r'[\\"]')
# How to figure out if we are processing strings that come from a byte
# source with undecodable characters.
_has_surrogates = re.compile(
'([^\ud800-\udbff]|\A)[\udc00-\udfff]([^\udc00-\udfff]|\Z)').search
# How to deal with a string containing bytes before handing it to the
# application through the 'normal' interface.
def _sanitize(string):
# Turn any escaped bytes into unicode 'unknown' char.
original_bytes = string.encode('ascii', 'surrogateescape')
return original_bytes.decode('ascii', 'replace')
# Helpers
def formataddr(pair, charset='utf-8'):
"""The inverse of parseaddr(), this takes a 2-tuple of the form
(realname, email_address) and returns the string value suitable
for an RFC 2822 From, To or Cc header.
If the first element of pair is false, then the second element is
returned unmodified.
Optional charset if given is the character set that is used to encode
realname in case realname is not ASCII safe. Can be an instance of str or
a Charset-like object which has a header_encode method. Default is
'utf-8'.
"""
name, address = pair
# The address MUST (per RFC) be ascii, so raise an UnicodeError if it isn't.
address.encode('ascii')
if name:
try:
name.encode('ascii')
except UnicodeEncodeError:
if isinstance(charset, str):
charset = Charset(charset)
encoded_name = charset.header_encode(name)
return "%s <%s>" % (encoded_name, address)
else:
quotes = ''
if specialsre.search(name):
quotes = '"'
name = escapesre.sub(r'\\\g<0>', name)
return '%s%s%s <%s>' % (quotes, name, quotes, address)
return address
def getaddresses(fieldvalues):
"""Return a list of (REALNAME, EMAIL) for each fieldvalue."""
all = COMMASPACE.join(fieldvalues)
a = _AddressList(all)
return a.addresslist
ecre = re.compile(r'''
=\? # literal =?
(?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
\? # literal ?
(?P<encoding>[qb]) # either a "q" or a "b", case insensitive
\? # literal ?
(?P<atom>.*?) # non-greedy up to the next ?= is the atom
\?= # literal ?=
''', re.VERBOSE | re.IGNORECASE)
def _format_timetuple_and_zone(timetuple, zone):
return '%s, %02d %s %04d %02d:%02d:%02d %s' % (
['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][timetuple[6]],
timetuple[2],
['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][timetuple[1] - 1],
timetuple[0], timetuple[3], timetuple[4], timetuple[5],
zone)
def formatdate(timeval=None, localtime=False, usegmt=False):
"""Returns a date string as specified by RFC 2822, e.g.:
Fri, 09 Nov 2001 01:08:47 -0000
Optional timeval if given is a floating point time value as accepted by
gmtime() and localtime(), otherwise the current time is used.
Optional localtime is a flag that when True, interprets timeval, and
returns a date relative to the local timezone instead of UTC, properly
taking daylight savings time into account.
Optional argument usegmt means that the timezone is written out as
an ascii string, not numeric one (so "GMT" instead of "+0000"). This
is needed for HTTP, and is only used when localtime==False.
"""
# Note: we cannot use strftime() because that honors the locale and RFC
# 2822 requires that day and month names be the English abbreviations.
if timeval is None:
timeval = time.time()
if localtime:
now = time.localtime(timeval)
# Calculate timezone offset, based on whether the local zone has
# daylight savings time, and whether DST is in effect.
if time.daylight and now[-1]:
offset = time.altzone
else:
offset = time.timezone
hours, minutes = divmod(abs(offset), 3600)
# Remember offset is in seconds west of UTC, but the timezone is in
# minutes east of UTC, so the signs differ.
if offset > 0:
sign = '-'
else:
sign = '+'
zone = '%s%02d%02d' % (sign, hours, minutes // 60)
else:
now = time.gmtime(timeval)
# Timezone offset is always -0000
if usegmt:
zone = 'GMT'
else:
zone = '-0000'
return _format_timetuple_and_zone(now, zone)
def format_datetime(dt, usegmt=False):
"""Turn a datetime into a date string as specified in RFC 2822.
If usegmt is True, dt must be an aware datetime with an offset of zero. In
this case 'GMT' will be rendered instead of the normal +0000 required by
RFC2822. This is to support HTTP headers involving date stamps.
"""
now = dt.timetuple()
if usegmt:
if dt.tzinfo is None or dt.tzinfo != datetime.timezone.utc:
raise ValueError("usegmt option requires a UTC datetime")
zone = 'GMT'
elif dt.tzinfo is None:
zone = '-0000'
else:
zone = dt.strftime("%z")
return _format_timetuple_and_zone(now, zone)
def make_msgid(idstring=None, domain=None):
"""Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
<20020201195627.33539.96671@nightshade.la.mastaler.com>
Optional idstring if given is a string used to strengthen the
uniqueness of the message id. Optional domain if given provides the
portion of the message id after the '@'. It defaults to the locally
defined hostname.
"""
timeval = time.time()
utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
pid = os.getpid()
randint = random.randrange(100000)
if idstring is None:
idstring = ''
else:
idstring = '.' + idstring
if domain is None:
domain = socket.getfqdn()
msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, domain)
return msgid
def parsedate_to_datetime(data):
_3to2list = list(_parsedate_tz(data))
dtuple, tz, = [_3to2list[:-1]] + _3to2list[-1:]
if tz is None:
return datetime.datetime(*dtuple[:6])
return datetime.datetime(*dtuple[:6],
tzinfo=datetime.timezone(datetime.timedelta(seconds=tz)))
def parseaddr(addr):
addrs = _AddressList(addr).addresslist
if not addrs:
return '', ''
return addrs[0]
# rfc822.unquote() doesn't properly de-backslash-ify in Python pre-2.3.
def unquote(str):
"""Remove quotes from a string."""
if len(str) > 1:
if str.startswith('"') and str.endswith('"'):
return str[1:-1].replace('\\\\', '\\').replace('\\"', '"')
if str.startswith('<') and str.endswith('>'):
return str[1:-1]
return str
# RFC2231-related functions - parameter encoding and decoding
def decode_rfc2231(s):
"""Decode string according to RFC 2231"""
parts = s.split(TICK, 2)
if len(parts) <= 2:
return None, None, s
return parts
def encode_rfc2231(s, charset=None, language=None):
"""Encode string according to RFC 2231.
If neither charset nor language is given, then s is returned as-is. If
charset is given but not language, the string is encoded using the empty
string for language.
"""
s = url_quote(s, safe='', encoding=charset or 'ascii')
if charset is None and language is None:
return s
if language is None:
language = ''
return "%s'%s'%s" % (charset, language, s)
rfc2231_continuation = re.compile(r'^(?P<name>\w+)\*((?P<num>[0-9]+)\*?)?$',
re.ASCII)
def decode_params(params):
"""Decode parameters list according to RFC 2231.
params is a sequence of 2-tuples containing (param name, string value).
"""
# Copy params so we don't mess with the original
params = params[:]
new_params = []
# Map parameter's name to a list of continuations. The values are a
# 3-tuple of the continuation number, the string value, and a flag
# specifying whether a particular segment is %-encoded.
rfc2231_params = {}
name, value = params.pop(0)
new_params.append((name, value))
while params:
name, value = params.pop(0)
if name.endswith('*'):
encoded = True
else:
encoded = False
value = unquote(value)
mo = rfc2231_continuation.match(name)
if mo:
name, num = mo.group('name', 'num')
if num is not None:
num = int(num)
rfc2231_params.setdefault(name, []).append((num, value, encoded))
else:
new_params.append((name, '"%s"' % quote(value)))
if rfc2231_params:
for name, continuations in rfc2231_params.items():
value = []
extended = False
# Sort by number
continuations.sort()
# And now append all values in numerical order, converting
# %-encodings for the encoded segments. If any of the
# continuation names ends in a *, then the entire string, after
# decoding segments and concatenating, must have the charset and
# language specifiers at the beginning of the string.
for num, s, encoded in continuations:
if encoded:
# Decode as "latin-1", so the characters in s directly
# represent the percent-encoded octet values.
# collapse_rfc2231_value treats this as an octet sequence.
s = url_unquote(s, encoding="latin-1")
extended = True
value.append(s)
value = quote(EMPTYSTRING.join(value))
if extended:
charset, language, value = decode_rfc2231(value)
new_params.append((name, (charset, language, '"%s"' % value)))
else:
new_params.append((name, '"%s"' % value))
return new_params
def collapse_rfc2231_value(value, errors='replace',
fallback_charset='us-ascii'):
if not isinstance(value, tuple) or len(value) != 3:
return unquote(value)
# While value comes to us as a unicode string, we need it to be a bytes
# object. We do not want bytes() normal utf-8 decoder, we want a straight
# interpretation of the string as character bytes.
charset, language, text = value
rawbytes = bytes(text, 'raw-unicode-escape')
try:
return str(rawbytes, charset, errors)
except LookupError:
# charset is not a known codec.
return unquote(text)
#
# datetime doesn't provide a localtime function yet, so provide one. Code
# adapted from the patch in issue 9527. This may not be perfect, but it is
# better than not having it.
#
def localtime(dt=None, isdst=-1):
"""Return local time as an aware datetime object.
If called without arguments, return current time. Otherwise *dt*
argument should be a datetime instance, and it is converted to the
local time zone according to the system time zone database. If *dt* is
naive (that is, dt.tzinfo is None), it is assumed to be in local time.
In this case, a positive or zero value for *isdst* causes localtime to
presume initially that summer time (for example, Daylight Saving Time)
is or is not (respectively) in effect for the specified time. A
negative value for *isdst* causes the localtime() function to attempt
to divine whether summer time is in effect for the specified time.
"""
if dt is None:
return datetime.datetime.now(datetime.timezone.utc).astimezone()
if dt.tzinfo is not None:
return dt.astimezone()
# We have a naive datetime. Convert to a (localtime) timetuple and pass to
# system mktime together with the isdst hint. System mktime will return
# seconds since epoch.
tm = dt.timetuple()[:-1] + (isdst,)
seconds = time.mktime(tm)
localtm = time.localtime(seconds)
try:
delta = datetime.timedelta(seconds=localtm.tm_gmtoff)
tz = datetime.timezone(delta, localtm.tm_zone)
except AttributeError:
# Compute UTC offset and compare with the value implied by tm_isdst.
# If the values match, use the zone name implied by tm_isdst.
delta = dt - datetime.datetime(*time.gmtime(seconds)[:6])
dst = time.daylight and localtm.tm_isdst > 0
gmtoff = -(time.altzone if dst else time.timezone)
if delta == datetime.timedelta(seconds=gmtoff):
tz = datetime.timezone(delta, time.tzname[dst])
else:
tz = datetime.timezone(delta)
return dt.replace(tzinfo=tz)
| gpl-3.0 |
ptcrypto/pycoin | pycoin/services/insight.py | 12 | 4901 | # provide support to insight API servers
# see also https://github.com/bitpay/insight-api
import decimal
import json
import logging
import io
try:
from urllib2 import HTTPError, urlopen
from urllib import urlencode
except ImportError:
from urllib.request import urlopen
from urllib.error import HTTPError
from urllib.parse import urlencode
from pycoin.block import BlockHeader
from pycoin.convention import btc_to_satoshi
from pycoin.encoding import double_sha256
from pycoin.merkle import merkle
from pycoin.serialize import b2h, b2h_rev, h2b, h2b_rev
from pycoin.tx.script import tools
from pycoin.tx import Spendable, Tx, TxIn, TxOut
class InsightService(object):
def __init__(self, base_url):
while base_url[-1] == '/':
base_url = base_url[:-1]
self.base_url = base_url
def get_blockchain_tip(self):
URL = "%s/api/status?q=getLastBlockHash" % self.base_url
d = urlopen(URL).read().decode("utf8")
r = json.loads(d)
return h2b_rev(r.get("lastblockhash"))
def get_blockheader(self, block_hash):
return self.get_blockheader_with_transaction_hashes(block_hash)[0]
def get_blockheader_with_transaction_hashes(self, block_hash):
URL = "%s/api/block/%s" % (self.base_url, b2h_rev(block_hash))
r = json.loads(urlopen(URL).read().decode("utf8"))
version = r.get("version")
previous_block_hash = h2b_rev(r.get("previousblockhash"))
merkle_root = h2b_rev(r.get("merkleroot"))
timestamp = r.get("time")
difficulty = int(r.get("bits"), 16)
nonce = int(r.get("nonce"))
tx_hashes = [h2b_rev(tx_hash) for tx_hash in r.get("tx")]
blockheader = BlockHeader(version, previous_block_hash, merkle_root, timestamp, difficulty, nonce)
if blockheader.hash() != block_hash:
return None, None
calculated_hash = merkle(tx_hashes, double_sha256)
if calculated_hash != merkle_root:
return None, None
blockheader.height = r.get("height")
return blockheader, tx_hashes
def get_block_height(self, block_hash):
return self.get_blockheader_with_transaction_hashes(block_hash)[0].height
def get_tx(self, tx_hash):
URL = "%s/api/tx/%s" % (self.base_url, b2h_rev(tx_hash))
r = json.loads(urlopen(URL).read().decode("utf8"))
tx = tx_from_json_dict(r)
if tx.hash() == tx_hash:
return tx
return None
def get_tx_confirmation_block(self, tx_hash):
return self.get_tx(tx_hash).confirmation_block_hash
def spendables_for_address(self, bitcoin_address):
"""
Return a list of Spendable objects for the
given bitcoin address.
"""
URL = "%s/api/addr/%s/utxo" % (self.base_url, bitcoin_address)
r = json.loads(urlopen(URL).read().decode("utf8"))
spendables = []
for u in r:
coin_value = btc_to_satoshi(str(u.get("amount")))
script = h2b(u.get("scriptPubKey"))
previous_hash = h2b_rev(u.get("txid"))
previous_index = u.get("vout")
spendables.append(Spendable(coin_value, script, previous_hash, previous_index))
return spendables
def spendables_for_addresses(self, bitcoin_addresses):
spendables = []
for addr in bitcoin_addresses:
spendables.extend(self.spendables_for_address(addr))
return spendables
def send_tx(self, tx):
s = io.BytesIO()
tx.stream(s)
tx_as_hex = b2h(s.getvalue())
data = urlencode(dict(rawtx=tx_as_hex)).encode("utf8")
URL = "%s/api/tx/send" % self.base_url
try:
d = urlopen(URL, data=data).read()
return d
except HTTPError as err:
if err.code == 400:
raise ValueError(err.readline())
raise err
def tx_from_json_dict(r):
version = r.get("version")
lock_time = r.get("locktime")
txs_in = []
for vin in r.get("vin"):
if "coinbase" in vin:
previous_hash = b'\0' * 32
script = h2b(vin.get("coinbase"))
previous_index = 4294967295
else:
previous_hash = h2b_rev(vin.get("txid"))
script = tools.compile(vin.get("scriptSig").get("asm"))
previous_index = vin.get("vout")
sequence = vin.get("sequence")
txs_in.append(TxIn(previous_hash, previous_index, script, sequence))
txs_out = []
for vout in r.get("vout"):
coin_value = btc_to_satoshi(decimal.Decimal(vout.get("value")))
script = tools.compile(vout.get("scriptPubKey").get("asm"))
txs_out.append(TxOut(coin_value, script))
tx = Tx(version, txs_in, txs_out, lock_time)
bh = r.get("blockhash")
if bh:
bh = h2b_rev(bh)
tx.confirmation_block_hash = bh
return tx
| mit |
mSenyor/sl4a | python/src/Lib/ntpath.py | 60 | 17131 | # Module 'ntpath' -- common operations on WinNT/Win95 pathnames
"""Common pathname manipulations, WindowsNT/95 version.
Instead of importing this module directly, import os and refer to this
module as os.path.
"""
import os
import sys
import stat
import genericpath
import warnings
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"ismount","walk","expanduser","expandvars","normpath","abspath",
"splitunc","curdir","pardir","sep","pathsep","defpath","altsep",
"extsep","devnull","realpath","supports_unicode_filenames","relpath"]
# strings representing various path-related bits and pieces
curdir = '.'
pardir = '..'
extsep = '.'
sep = '\\'
pathsep = ';'
altsep = '/'
defpath = '.;C:\\bin'
if 'ce' in sys.builtin_module_names:
defpath = '\\Windows'
elif 'os2' in sys.builtin_module_names:
# OS/2 w/ VACPP
altsep = '/'
devnull = 'nul'
# Normalize the case of a pathname and map slashes to backslashes.
# Other normalizations (such as optimizing '../' away) are not done
# (this is done by normpath).
def normcase(s):
"""Normalize case of pathname.
Makes all characters lowercase and all slashes into backslashes."""
return s.replace("/", "\\").lower()
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
# For DOS it is absolute if it starts with a slash or backslash (current
# volume), or if a pathname after the volume letter and colon / UNC resource
# starts with a slash or backslash.
def isabs(s):
"""Test whether a path is absolute"""
s = splitdrive(s)[1]
return s != '' and s[:1] in '/\\'
# Join two (or more) paths.
def join(a, *p):
"""Join two or more pathname components, inserting "\\" as needed.
If any component is an absolute path, all previous path components
will be discarded."""
path = a
for b in p:
b_wins = 0 # set to 1 iff b makes path irrelevant
if path == "":
b_wins = 1
elif isabs(b):
# This probably wipes out path so far. However, it's more
# complicated if path begins with a drive letter:
# 1. join('c:', '/a') == 'c:/a'
# 2. join('c:/', '/a') == 'c:/a'
# But
# 3. join('c:/a', '/b') == '/b'
# 4. join('c:', 'd:/') = 'd:/'
# 5. join('c:/', 'd:/') = 'd:/'
if path[1:2] != ":" or b[1:2] == ":":
# Path doesn't start with a drive letter, or cases 4 and 5.
b_wins = 1
# Else path has a drive letter, and b doesn't but is absolute.
elif len(path) > 3 or (len(path) == 3 and
path[-1] not in "/\\"):
# case 3
b_wins = 1
if b_wins:
path = b
else:
# Join, and ensure there's a separator.
assert len(path) > 0
if path[-1] in "/\\":
if b and b[0] in "/\\":
path += b[1:]
else:
path += b
elif path[-1] == ":":
path += b
elif b:
if b[0] in "/\\":
path += b
else:
path += "\\" + b
else:
# path is not empty and does not end with a backslash,
# but b is empty; since, e.g., split('a/') produces
# ('a', ''), it's best if join() adds a backslash in
# this case.
path += '\\'
return path
# Split a path in a drive specification (a drive letter followed by a
# colon) and the path specification.
# It is always true that drivespec + pathspec == p
def splitdrive(p):
"""Split a pathname into drive and path specifiers. Returns a 2-tuple
"(drive,path)"; either part may be empty"""
if p[1:2] == ':':
return p[0:2], p[2:]
return '', p
# Parse UNC paths
def splitunc(p):
"""Split a pathname into UNC mount point and relative path specifiers.
Return a 2-tuple (unc, rest); either part may be empty.
If unc is not empty, it has the form '//host/mount' (or similar
using backslashes). unc+rest is always the input path.
Paths containing drive letters never have an UNC part.
"""
if p[1:2] == ':':
return '', p # Drive letter present
firstTwo = p[0:2]
if firstTwo == '//' or firstTwo == '\\\\':
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
# \\machine\mountpoint\directories...
# directory ^^^^^^^^^^^^^^^
normp = normcase(p)
index = normp.find('\\', 2)
if index == -1:
##raise RuntimeError, 'illegal UNC path: "' + p + '"'
return ("", p)
index = normp.find('\\', index + 1)
if index == -1:
index = len(p)
return p[:index], p[index:]
return '', p
# Split a path in head (everything up to the last '/') and tail (the
# rest). After the trailing '/' is stripped, the invariant
# join(head, tail) == p holds.
# The resulting head won't end in '/' unless it is the root.
def split(p):
"""Split a pathname.
Return tuple (head, tail) where tail is everything after the final slash.
Either part may be empty."""
d, p = splitdrive(p)
# set i to index beyond p's last slash
i = len(p)
while i and p[i-1] not in '/\\':
i = i - 1
head, tail = p[:i], p[i:] # now tail has no slashes
# remove trailing slashes from head, unless it's all slashes
head2 = head
while head2 and head2[-1] in '/\\':
head2 = head2[:-1]
head = head2 or head
return d + head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Return the tail (basename) part of a path.
def basename(p):
"""Returns the final component of a pathname"""
return split(p)[1]
# Return the head (dirname) part of a path.
def dirname(p):
"""Returns the directory component of a pathname"""
return split(p)[0]
# Is a path a symbolic link?
# This will always return false on systems where posix.lstat doesn't exist.
def islink(path):
"""Test for symbolic link.
On WindowsNT/95 and OS/2 always returns false
"""
return False
# alias exists to lexists
lexists = exists
# Is a path a mount point? Either a root (with or without drive letter)
# or an UNC path with at most a / or \ after the mount point.
def ismount(path):
"""Test whether a path is a mount point (defined as root of drive)"""
unc, rest = splitunc(path)
if unc:
return rest in ("", "/", "\\")
p = splitdrive(path)[1]
return len(p) == 1 and p[0] in '/\\'
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.")
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = os.environ['HOMEDRIVE']
except KeyError:
drive = ''
userhome = join(drive, os.environ['HOMEPATH'])
if i != 1: #~user
userhome = join(dirname(userhome), path[1:i])
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# The following rules apply:
# - no expansion within single quotes
# - '$$' is translated into '$'
# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2%
# - ${varname} is accepted.
# - $varname is accepted.
# - %varname% is accepted.
# - varnames can be made out of letters, digits and the characters '_-'
# (though is not verifed in the ${varname} and %varname% cases)
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
def expandvars(path):
"""Expand shell variables of the forms $var, ${var} and %var%.
Unknown variables are left unchanged."""
if '$' not in path and '%' not in path:
return path
import string
varchars = string.ascii_letters + string.digits + '_-'
res = ''
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index]
if c == '\'': # no expansion within single quotes
path = path[index + 1:]
pathlen = len(path)
try:
index = path.index('\'')
res = res + '\'' + path[:index + 1]
except ValueError:
res = res + path
index = pathlen - 1
elif c == '%': # variable or '%'
if path[index + 1:index + 2] == '%':
res = res + c
index = index + 1
else:
path = path[index+1:]
pathlen = len(path)
try:
index = path.index('%')
except ValueError:
res = res + '%' + path
index = pathlen - 1
else:
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '%' + var + '%'
elif c == '$': # variable or '$$'
if path[index + 1:index + 2] == '$':
res = res + c
index = index + 1
elif path[index + 1:index + 2] == '{':
path = path[index+2:]
pathlen = len(path)
try:
index = path.index('}')
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '${' + var + '}'
except ValueError:
res = res + '${' + path
index = pathlen - 1
else:
var = ''
index = index + 1
c = path[index:index + 1]
while c != '' and c in varchars:
var = var + c
index = index + 1
c = path[index:index + 1]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '$' + var
if c != '':
index = index - 1
else:
res = res + c
index = index + 1
return res
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B.
# Previously, this function also truncated pathnames to 8+3 format,
# but as this module is called "ntpath", that's obviously wrong!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
path = path.replace("/", "\\")
prefix, path = splitdrive(path)
# We need to be careful here. If the prefix is empty, and the path starts
# with a backslash, it could either be an absolute path on the current
# drive (\dir1\dir2\file) or a UNC filename (\\server\mount\dir1\file). It
# is therefore imperative NOT to collapse multiple backslashes blindly in
# that case.
# The code below preserves multiple backslashes when there is no drive
# letter. This means that the invalid filename \\\a\b is preserved
# unchanged, where a\\\b is normalised to a\b. It's not clear that there
# is any better behaviour for such edge cases.
if prefix == '':
# No drive letter - preserve initial backslashes
while path[:1] == "\\":
prefix = prefix + "\\"
path = path[1:]
else:
# We have a drive letter - collapse initial backslashes
if path.startswith("\\"):
prefix = prefix + "\\"
path = path.lstrip("\\")
comps = path.split("\\")
i = 0
while i < len(comps):
if comps[i] in ('.', ''):
del comps[i]
elif comps[i] == '..':
if i > 0 and comps[i-1] != '..':
del comps[i-1:i+1]
i -= 1
elif i == 0 and prefix.endswith("\\"):
del comps[i]
else:
i += 1
else:
i += 1
# If the path is now empty, substitute '.'
if not prefix and not comps:
comps.append('.')
return prefix + "\\".join(comps)
# Return an absolute path.
try:
from nt import _getfullpathname
except ImportError: # not running on Windows - mock up something sensible
def abspath(path):
"""Return the absolute version of a path."""
if not isabs(path):
path = join(os.getcwd(), path)
return normpath(path)
else: # use native Windows method on Windows
def abspath(path):
"""Return the absolute version of a path."""
if path: # Empty path must return current working directory.
try:
path = _getfullpathname(path)
except WindowsError:
pass # Bad path - return unchanged.
else:
path = os.getcwd()
return normpath(path)
# realpath is a no-op on systems without islink support
realpath = abspath
# Win9x family and earlier have no Unicode filename support.
supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and
sys.getwindowsversion()[3] >= 2)
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = splitunc(path)
unc_start, rest = splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
| apache-2.0 |
pombreda/swtoolkit | test/signing_test.py | 18 | 4436 | #!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test for code signing. This is a LARGE test."""
import sys
import TestFramework
def TestSConstruct(scons_globals):
"""Test SConstruct file.
Args:
scons_globals: Global variables dict from the SConscript file.
"""
# Get globals from SCons
Environment = scons_globals['Environment']
env = Environment(
tools=['component_setup', 'target_platform_windows', 'code_signing'],
BUILD_TYPE='sign',
BUILD_TYPE_DESCRIPTION='Signing build',
)
env.Append(
BUILD_GROUPS=['default'],
BUILD_COMPONENTS=['SConscript'],
)
BuildComponents([env])
def TestSConscript(scons_globals):
"""Test SConscript file.
Args:
scons_globals: Global variables dict from the SConscript file.
"""
# Get globals from SCons
scons_globals['Import']('env')
env = scons_globals['env']
prog = env.Program('hello.exe', 'hello.c')
env.SignedBinary('hello_unsigned.exe', prog)
env.SignedBinary('hello_signed.exe', prog,
CERTIFICATE_PATH='fake.pfx',
CERTIFICATE_PASSWORD='obscure')
hello_c_contents = """
#include <stdio.h>
int main() {
printf("Hello, world!\\n");
}
"""
def main():
test = TestFramework.TestFramework()
platforms_with_signing = ['win32', 'cygwin']
if sys.platform not in platforms_with_signing:
msg = 'Platform %s does not support signing; skipping test.\n'
test.skip_test(msg % repr(sys.platform))
return 0
test.subdir('signing')
base = 'signing/'
base_out = base + 'scons-out/sign/obj/'
test.FakeWindowsPFX(base + 'fake.pfx')
test.WriteSConscript(base + 'SConstruct', TestSConstruct)
test.WriteSConscript(base + 'SConscript', TestSConscript)
test.write(base + 'hello.c', hello_c_contents)
# Run SCons.
test.run(chdir=base)
# Check for test output.
test.must_exist(base_out + 'hello.exe')
test.must_exist(base_out + 'hello_unsigned.exe')
test.must_exist(base_out + 'hello_signed.exe')
# Test output must be runnable.
test.run(program=test.workpath(base_out + 'hello.exe'),
stdout='Hello, world!\n')
test.run(program=test.workpath(base_out + 'hello_signed.exe'),
stdout='Hello, world!\n')
test.run(program=test.workpath(base_out + 'hello_unsigned.exe'),
stdout='Hello, world!\n')
# By default signing is a pass thru, so these two should match.
if (test.read(base_out + 'hello.exe') !=
test.read(base_out + 'hello_unsigned.exe')):
test.fail_test()
# Signed version should not match.
if (test.read(base_out + 'hello.exe') ==
test.read(base_out + 'hello_signed.exe')):
test.fail_test()
# Cover certificate with junk.
test.write(base + 'fake.pfx', 'blahblah!\n')
# Run SCons, expecting failure.
test.run(chdir=base, stderr=None, status=2)
test.fail_test(test.stderr().find('SignTool Error') == -1)
test.pass_test()
if __name__ == '__main__':
main()
| bsd-3-clause |
interline/xhtml2pdf | xhtml2pdf/turbogears.py | 99 | 1449 | # -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from turbogears.decorator import weak_signature_decorator
import xhtml2pdf.pisa as pisa
import StringIO
import cherrypy
def to_pdf(filename=None, content_type="application/pdf"):
def entangle(func):
def decorated(func, *args, **kw):
output = func(*args, **kw)
dst = StringIO.StringIO()
result = pisa.CreatePDF(
StringIO.StringIO(output),
dst
)
if not result.err:
cherrypy.response.headers["Content-Type"] = content_type
if filename:
cherrypy.response.headers["Content-Disposition"] = "attachment; filename=" + filename
output = dst.getvalue()
return output
return decorated
return weak_signature_decorator(entangle)
topdf = to_pdf
| apache-2.0 |
alistairlow/tensorflow | tensorflow/contrib/signal/python/ops/spectral_ops.py | 19 | 12619 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Spectral operations (e.g. Short-time Fourier Transform)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.contrib.signal.python.ops import reconstruction_ops
from tensorflow.contrib.signal.python.ops import shape_ops
from tensorflow.contrib.signal.python.ops import window_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import spectral_ops
def stft(signals, frame_length, frame_step, fft_length=None,
window_fn=functools.partial(window_ops.hann_window, periodic=True),
pad_end=False, name=None):
"""Computes the [Short-time Fourier Transform][stft] of `signals`.
Implemented with GPU-compatible ops and supports gradients.
Args:
signals: A `[..., samples]` `float32` `Tensor` of real-valued signals.
frame_length: An integer scalar `Tensor`. The window length in samples.
frame_step: An integer scalar `Tensor`. The number of samples to step.
fft_length: An integer scalar `Tensor`. The size of the FFT to apply.
If not provided, uses the smallest power of 2 enclosing `frame_length`.
window_fn: A callable that takes a window length and a `dtype` keyword
argument and returns a `[window_length]` `Tensor` of samples in the
provided datatype. If set to `None`, no windowing is used.
pad_end: Whether to pad the end of `signals` with zeros when the provided
frame length and step produces a frame that lies partially past its end.
name: An optional name for the operation.
Returns:
A `[..., frames, fft_unique_bins]` `Tensor` of `complex64` STFT values where
`fft_unique_bins` is `fft_length // 2 + 1` (the unique components of the
FFT).
Raises:
ValueError: If `signals` is not at least rank 1, `frame_length` is
not scalar, or `frame_step` is not scalar.
[stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform
"""
with ops.name_scope(name, 'stft', [signals, frame_length,
frame_step]):
signals = ops.convert_to_tensor(signals, name='signals')
signals.shape.with_rank_at_least(1)
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
frame_step = ops.convert_to_tensor(frame_step, name='frame_step')
frame_step.shape.assert_has_rank(0)
if fft_length is None:
fft_length = _enclosing_power_of_two(frame_length)
else:
fft_length = ops.convert_to_tensor(fft_length, name='fft_length')
framed_signals = shape_ops.frame(
signals, frame_length, frame_step, pad_end=pad_end)
# Optionally window the framed signals.
if window_fn is not None:
window = window_fn(frame_length, dtype=framed_signals.dtype)
framed_signals *= window
# spectral_ops.rfft produces the (fft_length/2 + 1) unique components of the
# FFT of the real windowed signals in framed_signals.
return spectral_ops.rfft(framed_signals, [fft_length])
def inverse_stft_window_fn(frame_step,
forward_window_fn=functools.partial(
window_ops.hann_window, periodic=True),
name=None):
"""Generates a window function that can be used in `inverse_stft`.
Constructs a window that is equal to the forward window with a further
pointwise amplitude correction. `inverse_stft_window_fn` is equivalent to
`forward_window_fn` in the case where it would produce an exact inverse.
See examples in `inverse_stft` documentation for usage.
Args:
frame_step: An integer scalar `Tensor`. The number of samples to step.
forward_window_fn: window_fn used in the forward transform, `stft`.
name: An optional name for the operation.
Returns:
A callable that takes a window length and a `dtype` keyword argument and
returns a `[window_length]` `Tensor` of samples in the provided datatype.
The returned window is suitable for reconstructing original waveform in
inverse_stft.
"""
with ops.name_scope(name, 'inverse_stft_window_fn', [forward_window_fn]):
frame_step = ops.convert_to_tensor(frame_step, name='frame_step')
frame_step.shape.assert_has_rank(0)
def inverse_stft_window_fn_inner(frame_length, dtype):
"""Computes a window that can be used in `inverse_stft`.
Args:
frame_length: An integer scalar `Tensor`. The window length in samples.
dtype: Data type of waveform passed to `stft`.
Returns:
A window suitable for reconstructing original waveform in `inverse_stft`.
Raises:
ValueError: If `frame_length` is not scalar, `forward_window_fn` is not a
callable that takes a window length and a `dtype` keyword argument and
returns a `[window_length]` `Tensor` of samples in the provided datatype
`frame_step` is not scalar, or `frame_step` is not scalar.
"""
with ops.name_scope(name, 'inverse_stft_window_fn', [forward_window_fn]):
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
# Use equation 7 from Griffin + Lim.
forward_window = forward_window_fn(frame_length, dtype=dtype)
denom = math_ops.square(forward_window)
overlaps = -(-frame_length // frame_step) # Ceiling division.
denom = array_ops.pad(denom, [(0, overlaps * frame_step - frame_length)])
denom = array_ops.reshape(denom, [overlaps, frame_step])
denom = math_ops.reduce_sum(denom, 0, keep_dims=True)
denom = array_ops.tile(denom, [overlaps, 1])
denom = array_ops.reshape(denom, [overlaps * frame_step])
return forward_window / denom[:frame_length]
return inverse_stft_window_fn_inner
def inverse_stft(stfts,
frame_length,
frame_step,
fft_length=None,
window_fn=functools.partial(window_ops.hann_window,
periodic=True),
name=None):
"""Computes the inverse [Short-time Fourier Transform][stft] of `stfts`.
To reconstruct an original waveform, a complimentary window function should
be used in inverse_stft. Such a window function can be constructed with
tf.contrib.signal.inverse_stft_window_fn.
Example:
```python
frame_length = 400
frame_step = 160
waveform = tf.placeholder(dtype=tf.float32, shape=[1000])
stft = tf.contrib.signal.stft(waveform, frame_length, frame_step)
inverse_stft = tf.contrib.signal.inverse_stft(
stft, frame_length, frame_step,
window_fn=tf.contrib.signal.inverse_stft_window_fn(frame_step))
```
if a custom window_fn is used in stft, it must be passed to
inverse_stft_window_fn:
```python
frame_length = 400
frame_step = 160
window_fn = functools.partial(window_ops.hamming_window, periodic=True),
waveform = tf.placeholder(dtype=tf.float32, shape=[1000])
stft = tf.contrib.signal.stft(
waveform, frame_length, frame_step, window_fn=window_fn)
inverse_stft = tf.contrib.signal.inverse_stft(
stft, frame_length, frame_step,
window_fn=tf.contrib.signal.inverse_stft_window_fn(
frame_step, forward_window_fn=window_fn))
```
Implemented with GPU-compatible ops and supports gradients.
Args:
stfts: A `complex64` `[..., frames, fft_unique_bins]` `Tensor` of STFT bins
representing a batch of `fft_length`-point STFTs where `fft_unique_bins`
is `fft_length // 2 + 1`
frame_length: An integer scalar `Tensor`. The window length in samples.
frame_step: An integer scalar `Tensor`. The number of samples to step.
fft_length: An integer scalar `Tensor`. The size of the FFT that produced
`stfts`. If not provided, uses the smallest power of 2 enclosing
`frame_length`.
window_fn: A callable that takes a window length and a `dtype` keyword
argument and returns a `[window_length]` `Tensor` of samples in the
provided datatype. If set to `None`, no windowing is used.
name: An optional name for the operation.
Returns:
A `[..., samples]` `Tensor` of `float32` signals representing the inverse
STFT for each input STFT in `stfts`.
Raises:
ValueError: If `stfts` is not at least rank 2, `frame_length` is not scalar,
`frame_step` is not scalar, or `fft_length` is not scalar.
[stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform
"""
with ops.name_scope(name, 'inverse_stft', [stfts]):
stfts = ops.convert_to_tensor(stfts, name='stfts')
stfts.shape.with_rank_at_least(2)
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
frame_step = ops.convert_to_tensor(frame_step, name='frame_step')
frame_step.shape.assert_has_rank(0)
if fft_length is None:
fft_length = _enclosing_power_of_two(frame_length)
else:
fft_length = ops.convert_to_tensor(fft_length, name='fft_length')
fft_length.shape.assert_has_rank(0)
real_frames = spectral_ops.irfft(stfts, [fft_length])
# frame_length may be larger or smaller than fft_length, so we pad or
# truncate real_frames to frame_length.
frame_length_static = tensor_util.constant_value(frame_length)
# If we don't know the shape of real_frames's inner dimension, pad and
# truncate to frame_length.
if (frame_length_static is None or
real_frames.shape.ndims is None or
real_frames.shape[-1].value is None):
real_frames = real_frames[..., :frame_length]
real_frames_rank = array_ops.rank(real_frames)
real_frames_shape = array_ops.shape(real_frames)
paddings = array_ops.concat(
[array_ops.zeros([real_frames_rank - 1, 2],
dtype=frame_length.dtype),
[[0, math_ops.maximum(0, frame_length - real_frames_shape[-1])]]], 0)
real_frames = array_ops.pad(real_frames, paddings)
# We know real_frames's last dimension and frame_length statically. If they
# are different, then pad or truncate real_frames to frame_length.
elif real_frames.shape[-1].value > frame_length_static:
real_frames = real_frames[..., :frame_length_static]
elif real_frames.shape[-1].value < frame_length_static:
pad_amount = frame_length_static - real_frames.shape[-1].value
real_frames = array_ops.pad(real_frames,
[[0, 0]] * (real_frames.shape.ndims - 1) +
[[0, pad_amount]])
# The above code pads the inner dimension of real_frames to frame_length,
# but it does so in a way that may not be shape-inference friendly.
# Restore shape information if we are able to.
if frame_length_static is not None and real_frames.shape.ndims is not None:
real_frames.set_shape([None] * (real_frames.shape.ndims - 1) +
[frame_length_static])
# Optionally window and overlap-add the inner 2 dimensions of real_frames
# into a single [samples] dimension.
if window_fn is not None:
window = window_fn(frame_length, dtype=stfts.dtype.real_dtype)
real_frames *= window
return reconstruction_ops.overlap_and_add(real_frames, frame_step)
def _enclosing_power_of_two(value):
"""Return 2**N for integer N such that 2**N >= value."""
value_static = tensor_util.constant_value(value)
if value_static is not None:
return constant_op.constant(
int(2**np.ceil(np.log(value_static) / np.log(2.0))), value.dtype)
return math_ops.cast(
math_ops.pow(2.0, math_ops.ceil(
math_ops.log(math_ops.to_float(value)) / math_ops.log(2.0))),
value.dtype)
| apache-2.0 |
mistercrunch/airflow | airflow/migrations/versions/561833c1c74b_add_password_column_to_user.py | 8 | 1266 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""add password column to user
Revision ID: 561833c1c74b
Revises: 40e67319e3a9
Create Date: 2015-11-30 06:51:25.872557
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '561833c1c74b'
down_revision = '40e67319e3a9'
branch_labels = None
depends_on = None
def upgrade(): # noqa: D103
op.add_column('user', sa.Column('password', sa.String(255)))
def downgrade(): # noqa: D103
op.drop_column('user', 'password')
| apache-2.0 |
c0yote/AbstractControls | dep/FakeIt/generate_fakeit_single_header.py | 2 | 2982 | import io
import os
import sys
import re
import datetime
fakeitPath = "."
includesMatcher = re.compile( r'\s*#include\s*"(.*)"' )
pragmaOnceMatcher = re.compile( r'\s*#pragma\s*once\s*' )
processedHeaders = set([])
def write_line(out, line ):
out.write( line + "\n")
def parseFile(out, config, filename):
print( "parsing " + filename )
if os.path.exists( os.path.join( fakeitPath, 'config' , config, filename) ):
filePath = os.path.join( fakeitPath, 'config' , config, filename)
elif os.path.exists( os.path.join( fakeitPath, 'include' , filename) ):
filePath = os.path.join( fakeitPath, 'include' , filename)
else:
return
write_line(out, "// #included from: " + filename )
f = open(filePath, 'r' )
for line in f:
m = includesMatcher.match( line )
if m:
header = m.group(1)
if not header in processedHeaders:
processedHeaders.add( header )
parseFile(out, config, header)
else:
write_line(out, line.rstrip() )
def writeHeaderComment(out, config):
write_line(out, "/*" )
write_line(out, " * FakeIt - A Simplified C++ Mocking Framework" )
write_line(out, " * Copyright (c) Eran Pe'er 2013" )
write_line(out, " * Generated: {0}".format( datetime.datetime.now()) )
write_line(out, " * Distributed under the MIT License. Please refer to the LICENSE file at:")
write_line(out, " * https://github.com/eranpeer/FakeIt" )
write_line(out, " */" )
def generateFakeitIncludeFile(out, config):
parseFile(out, config, 'fakeit.hpp' )
comment_re = re.compile(
r'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?',
re.DOTALL | re.MULTILINE
)
def comment_replacer(match):
start,mid,end = match.group(1,2,3)
if mid is None:
# single line comment
return ''
elif start is not None or end is not None:
# multi line comment at start or end of a line
return ''
elif '\n' in mid:
# multi line comment with line break
return '\n'
else:
# multi line comment without line break
return ' '
def remove_comments(text):
return comment_re.sub(comment_replacer, text)
def remove_pragma_once(text):
return pragmaOnceMatcher.sub('\n', text)
def createFakeitFileText(config):
textStream = io.StringIO()
generateFakeitIncludeFile(textStream, config)
text = textStream.getvalue()
text = remove_comments(text)
text = remove_pragma_once(text)
return text
def createFakeitFile(config,text):
os.makedirs(os.path.join( fakeitPath, 'single_header', config))
outputPath = os.path.join( fakeitPath, 'single_header', config, 'fakeit.hpp' )
out = open( outputPath, 'w' )
write_line(out, "#pragma once" )
writeHeaderComment(out, config)
out.write(text)
out.close()
config = sys.argv[1]
text = createFakeitFileText(config)
createFakeitFile(config,text)
| mit |
jayoshih/content-curation | contentcuration/contentcuration/dev_settings.py | 2 | 1411 | # flake8: noqa
from .test_settings import * # noqa
# These endpoints will throw an error on the django debug panel
EXCLUDED_DEBUG_URLS = [
"/content/storage",
]
def custom_show_toolbar(request):
return not any(request.path.startswith(url) for url in EXCLUDED_DEBUG_URLS)
LANGUAGES += (('ar', ugettext('Arabic')),) # noqa
try:
import debug_panel # noqa
except ImportError:
# no debug panel, no use trying to add it to our middleware
pass
else:
# if debug_panel exists, add it to our INSTALLED_APPS
INSTALLED_APPS += ('debug_panel', 'debug_toolbar', 'pympler')
MIDDLEWARE_CLASSES += ('debug_panel.middleware.DebugPanelMiddleware',)
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': custom_show_toolbar,
}
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
]
AWS_AUTO_CREATE_BUCKET = True
| mit |
Papa2k15/flask | scripts/flask-07-upgrade.py | 148 | 10659 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
flask-07-upgrade
~~~~~~~~~~~~~~~~
This command line script scans a whole application tree and attempts to
output an unified diff with all the changes that are necessary to easily
upgrade the application to 0.7 and to not yield deprecation warnings.
This will also attempt to find `after_request` functions that don't modify
the response and appear to be better suited for `teardown_request`.
This application is indeed an incredible hack, but because what it
attempts to accomplish is impossible to do statically it tries to support
the most common patterns at least. The diff it generates should be
hand reviewed and not applied blindly without making backups.
:copyright: (c) Copyright 2015 by Armin Ronacher.
:license: see LICENSE for more details.
"""
import re
import os
import inspect
import difflib
import posixpath
from optparse import OptionParser
try:
import ast
except ImportError:
ast = None
TEMPLATE_LOOKAHEAD = 4096
_app_re_part = r'((?:[a-zA-Z_][a-zA-Z0-9_]*app)|app|application)'
_string_re_part = r"('([^'\\]*(?:\\.[^'\\]*)*)'" \
r'|"([^"\\]*(?:\\.[^"\\]*)*)")'
_from_import_re = re.compile(r'^\s*from flask import\s+')
_url_for_re = re.compile(r'\b(url_for\()(%s)' % _string_re_part)
_render_template_re = re.compile(r'\b(render_template\()(%s)' % _string_re_part)
_after_request_re = re.compile(r'((?:@\S+\.(?:app_)?))(after_request)(\b\s*$)(?m)')
_module_constructor_re = re.compile(r'([a-zA-Z0-9_][a-zA-Z0-9_]*)\s*=\s*Module'
r'\(__name__\s*(?:,\s*(?:name\s*=\s*)?(%s))?' %
_string_re_part)
_error_handler_re = re.compile(r'%s\.error_handlers\[\s*(\d+)\s*\]' % _app_re_part)
_mod_route_re = re.compile(r'@([a-zA-Z0-9_][a-zA-Z0-9_]*)\.route')
_blueprint_related = [
(re.compile(r'request\.module'), 'request.blueprint'),
(re.compile(r'register_module'), 'register_blueprint'),
(re.compile(r'%s\.modules' % _app_re_part), '\\1.blueprints')
]
def make_diff(filename, old, new):
for line in difflib.unified_diff(old.splitlines(), new.splitlines(),
posixpath.normpath(posixpath.join('a', filename)),
posixpath.normpath(posixpath.join('b', filename)),
lineterm=''):
print line
def looks_like_teardown_function(node):
returns = [x for x in ast.walk(node) if isinstance(x, ast.Return)]
if len(returns) != 1:
return
return_def = returns[0]
resp_name = node.args.args[0]
if not isinstance(return_def.value, ast.Name) or \
return_def.value.id != resp_name.id:
return
for body_node in node.body:
for child in ast.walk(body_node):
if isinstance(child, ast.Name) and \
child.id == resp_name.id:
if child is not return_def.value:
return
return resp_name.id
def fix_url_for(contents, module_declarations=None):
if module_declarations is None:
skip_module_test = True
else:
skip_module_test = False
mapping = dict(module_declarations)
annotated_lines = []
def make_line_annotations():
if not annotated_lines:
last_index = 0
for line in contents.splitlines(True):
last_index += len(line)
annotated_lines.append((last_index, line))
def backtrack_module_name(call_start):
make_line_annotations()
for idx, (line_end, line) in enumerate(annotated_lines):
if line_end > call_start:
for _, line in reversed(annotated_lines[:idx]):
match = _mod_route_re.search(line)
if match is not None:
shortname = match.group(1)
return mapping.get(shortname)
def handle_match(match):
if not skip_module_test:
modname = backtrack_module_name(match.start())
if modname is None:
return match.group(0)
prefix = match.group(1)
endpoint = ast.literal_eval(match.group(2))
if endpoint.startswith('.'):
endpoint = endpoint[1:]
elif '.' not in endpoint:
endpoint = '.' + endpoint
else:
return match.group(0)
return prefix + repr(endpoint)
return _url_for_re.sub(handle_match, contents)
def fix_teardown_funcs(contents):
def is_return_line(line):
args = line.strip().split()
return args and args[0] == 'return'
def fix_single(match, lines, lineno):
if not lines[lineno + 1].startswith('def'):
return
block_lines = inspect.getblock(lines[lineno + 1:])
func_code = ''.join(block_lines)
if func_code[0].isspace():
node = ast.parse('if 1:\n' + func_code).body[0].body
else:
node = ast.parse(func_code).body[0]
response_param_name = looks_like_teardown_function(node)
if response_param_name is None:
return
before = lines[:lineno]
decorator = [match.group(1) +
match.group(2).replace('after_', 'teardown_') +
match.group(3)]
body = [line.replace(response_param_name, 'exception')
for line in block_lines if
not is_return_line(line)]
after = lines[lineno + len(block_lines) + 1:]
return before + decorator + body + after
content_lines = contents.splitlines(True)
while 1:
found_one = False
for idx, line in enumerate(content_lines):
match = _after_request_re.match(line)
if match is None:
continue
new_content_lines = fix_single(match, content_lines, idx)
if new_content_lines is not None:
content_lines = new_content_lines
break
else:
break
return ''.join(content_lines)
def get_module_autoname(filename):
directory, filename = os.path.split(filename)
if filename != '__init__.py':
return os.path.splitext(filename)[0]
return os.path.basename(directory)
def rewrite_from_imports(prefix, fromlist, lineiter):
import_block = [prefix, fromlist]
if fromlist[0] == '(' and fromlist[-1] != ')':
for line in lineiter:
import_block.append(line)
if line.rstrip().endswith(')'):
break
elif fromlist[-1] == '\\':
for line in lineiter:
import_block.append(line)
if line.rstrip().endswith('\\'):
break
return ''.join(import_block).replace('Module', 'Blueprint')
def rewrite_blueprint_imports(contents):
new_file = []
lineiter = iter(contents.splitlines(True))
for line in lineiter:
match = _from_import_re.search(line)
if match is not None:
new_file.extend(rewrite_from_imports(match.group(),
line[match.end():],
lineiter))
else:
new_file.append(line)
return ''.join(new_file)
def rewrite_for_blueprints(contents, filename):
modules_declared = []
def handle_match(match):
target = match.group(1)
name_param = match.group(2)
if name_param is None:
modname = get_module_autoname(filename)
else:
modname = ast.literal_eval(name_param)
modules_declared.append((target, modname))
return '%s = %s' % (target, 'Blueprint(%r, __name__' % modname)
new_contents = _module_constructor_re.sub(handle_match, contents)
if modules_declared:
new_contents = rewrite_blueprint_imports(new_contents)
for pattern, replacement in _blueprint_related:
new_contents = pattern.sub(replacement, new_contents)
return new_contents, dict(modules_declared)
def upgrade_python_file(filename, contents, teardown):
new_contents = contents
if teardown:
new_contents = fix_teardown_funcs(new_contents)
new_contents, modules = rewrite_for_blueprints(new_contents, filename)
new_contents = fix_url_for(new_contents, modules)
new_contents = _error_handler_re.sub('\\1.error_handler_spec[None][\\2]',
new_contents)
make_diff(filename, contents, new_contents)
def upgrade_template_file(filename, contents):
new_contents = fix_url_for(contents, None)
make_diff(filename, contents, new_contents)
def walk_path(path):
this_file = os.path.realpath(__file__).rstrip('c')
for dirpath, dirnames, filenames in os.walk(path):
dirnames[:] = [x for x in dirnames if not x.startswith('.')]
for filename in filenames:
filename = os.path.join(dirpath, filename)
if os.path.realpath(filename) == this_file:
continue
if filename.endswith('.py'):
yield filename, 'python'
# skip files that are diffs. These might be false positives
# when run multiple times.
elif not filename.endswith(('.diff', '.patch', '.udiff')):
with open(filename) as f:
contents = f.read(TEMPLATE_LOOKAHEAD)
if '{% for' or '{% if' or '{{ url_for' in contents:
yield filename, 'template'
def scan_path(path=None, teardown=True):
for filename, type in walk_path(path):
with open(filename) as f:
contents = f.read()
if type == 'python':
upgrade_python_file(filename, contents, teardown)
elif type == 'template':
upgrade_template_file(filename, contents)
def main():
"""Entrypoint"""
parser = OptionParser(usage='%prog [options] [paths]')
parser.add_option('-T', '--no-teardown-detection', dest='no_teardown',
action='store_true', help='Do not attempt to '
'detect teardown function rewrites.')
parser.add_option('-b', '--bundled-templates', dest='bundled_tmpl',
action='store_true', help='Indicate to the system '
'that templates are bundled with modules. Default '
'is auto detect.')
options, args = parser.parse_args()
if not args:
args = ['.']
if ast is None:
parser.error('Python 2.6 or later is required to run the upgrade script.')
for path in args:
scan_path(path, teardown=not options.no_teardown)
if __name__ == '__main__':
main()
| bsd-3-clause |
FireballDWF/cloud-custodian | tests/test_elb.py | 5 | 21540 | # Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from .common import BaseTest
from c7n.exceptions import PolicyValidationError
from c7n.executor import MainThreadExecutor
from c7n.resources.elb import ELB, SetSslListenerPolicy
class ELBTagTest(BaseTest):
def test_elb_tag_and_remove(self):
self.patch(ELB, "executor_factory", MainThreadExecutor)
session_factory = self.replay_flight_data("test_elb_tag_and_remove")
client = session_factory().client("elb")
policy = self.load_policy(
{
"name": "elb-tag",
"resource": "elb",
"filters": [{"LoadBalancerName": "CloudCustodian"}],
"actions": [{"type": "tag", "key": "xyz", "value": "abdef"}],
},
config={"account_id": "644160558196"},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
tags = client.describe_tags(LoadBalancerNames=["CloudCustodian"])[
"TagDescriptions"
][
0
][
"Tags"
]
tag_map = {t["Key"]: t["Value"] for t in tags}
self.assertTrue("xyz" in tag_map)
policy = self.load_policy(
{
"name": "elb-tag",
"resource": "elb",
"filters": [{"LoadBalancerName": "CloudCustodian"}],
"actions": [{"type": "remove-tag", "tags": ["xyz"]}],
},
config={"account_id": "644160558196"},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
tags = client.describe_tags(LoadBalancerNames=["CloudCustodian"])[
"TagDescriptions"
][
0
][
"Tags"
]
tag_map = {t["Key"]: t["Value"] for t in tags}
self.assertFalse("xyz" in tag_map)
def test_elb_tags(self):
self.patch(ELB, "executor_factory", MainThreadExecutor)
session_factory = self.replay_flight_data("test_elb_tags")
policy = self.load_policy(
{
"name": "elb-mark",
"resource": "elb",
"filters": [{"tag:Platform": "ubuntu"}],
},
config={"account_id": "644160558196"},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["LoadBalancerName"], "CloudCustodian")
def test_mark_and_match(self):
session_factory = self.replay_flight_data("test_elb_mark_and_match")
policy = self.load_policy(
{
"name": "elb-mark",
"resource": "elb",
"filters": [{"LoadBalancerName": "CloudCustodian"}],
"actions": [
{
"type": "mark-for-op",
"op": "delete",
"tag": "custodian_next",
"days": 1,
}
],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
tags = session_factory().client("elb").describe_tags(
LoadBalancerNames=["CloudCustodian"]
)[
"TagDescriptions"
][
0
][
"Tags"
]
tag_map = {t["Key"]: t["Value"] for t in tags}
self.assertTrue("custodian_next" in tag_map)
policy = self.load_policy(
{
"name": "elb-mark-filter",
"resource": "elb",
"filters": [
{"type": "marked-for-op", "tag": "custodian_next", "op": "delete"}
],
},
config={"account_id": "644160558196"},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
class ELBInstance(BaseTest):
def test_instance_filter(self):
session_factory = self.replay_flight_data("test_elb_instance_filter")
policy = self.load_policy(
{
"name": "elb-instance",
"resource": "elb",
"filters": [
{"type": "instance", "key": "ImageId", "value": "ami-40d28157"}
],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["LoadBalancerName"], "balanced")
class HealthCheckProtocolMismatchTest(BaseTest):
def test_healthcheck_protocol_mismatch(self):
session_factory = self.replay_flight_data("test_healthcheck_protocol_mismatch")
policy = self.load_policy(
{
"name": "healthcheck-protocol-mismatch",
"resource": "elb",
"filters": [{"type": "healthcheck-protocol-mismatch"}],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 3)
# make sure we matched the right load balcners
elb_names = set([elb["LoadBalancerName"] for elb in resources])
self.assertEqual(
elb_names,
set(
[
"test-elb-no-listeners",
"test-elb-protocol-matches",
"test-elb-multiple-listeners",
]
),
)
class SSLPolicyTest(BaseTest):
def test_ssl_ciphers(self):
session_factory = self.replay_flight_data("test_ssl_ciphers")
policy = self.load_policy(
{
"name": "test-ssl-ciphers",
"resource": "elb",
"filters": [{"type": "ssl-policy", "blacklist": ["Protocol-SSLv2"]}],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["LoadBalancerName"], "test-elb-invalid-policy")
def test_set_ssl_listener_policy_fail(self):
session_factory = self.replay_flight_data("test_set_ssl_listener")
self.patch(SetSslListenerPolicy, 'process_elb', lambda self, client, elb: elb.xyz)
policy = self.load_policy({
"name": "test-set-ssl-listerner",
"resource": "elb",
"filters": [{'LoadBalancerName': 'test-elb'}],
"actions": [{
"type": "set-ssl-listener-policy",
"name": "testpolicy",
"attributes": ["AES128-SHA256", "Protocol-TLSv1"]}]},
session_factory=session_factory)
self.assertRaises(AttributeError, policy.run)
def test_set_ssl_listener_policy(self):
session_factory = self.replay_flight_data("test_set_ssl_listener")
client = session_factory().client("elb")
policy = self.load_policy(
{
"name": "test-set-ssl-listerner",
"resource": "elb",
"filters": [
{
"type": "ssl-policy",
"whitelist": ["AES128-SHA256", "Protocol-TLSv1"],
},
{
"type": "value",
"key": "LoadBalancerName",
"value": "test-elb",
"op": "eq",
},
],
"actions": [
{
"type": "set-ssl-listener-policy",
"name": "testpolicy",
"attributes": ["AES128-SHA256", "Protocol-TLSv1"],
}
],
},
session_factory=session_factory,
)
policy.run()
response_pol = client.describe_load_balancers(LoadBalancerNames=["test-elb"])
response_ciphers = client.describe_load_balancer_policies(
LoadBalancerName="test-elb", PolicyNames=["testpolicy-1493768308000"]
)
curr_pol = response_pol["LoadBalancerDescriptions"][0]["ListenerDescriptions"][
0
][
"PolicyNames"
]
curr_ciphers = []
for x in response_ciphers["PolicyDescriptions"][0][
"PolicyAttributeDescriptions"
]:
curr_ciphers.append({str(k): str(v) for k, v in x.items()})
active_ciphers = [
x["AttributeName"] for x in curr_ciphers if x["AttributeValue"] == "true"
]
self.assertEqual(
curr_pol,
[
"AWSConsole-LBCookieStickinessPolicy-test-elb-1493748038333",
"testpolicy-1493768308000",
],
)
self.assertEqual(active_ciphers, ["Protocol-TLSv1", "AES128-SHA256"])
def test_ssl_matching(self):
session_factory = self.replay_flight_data("test_ssl_ciphers")
policy = self.load_policy(
{
"name": "test-ssl-matching",
"resource": "elb",
"filters": [
{
"type": "ssl-policy",
"matching": "^Protocol-",
"whitelist": [
"Protocol-TLSv1", "Protocol-TLSv1.1", "Protocol-TLSv1.2"
],
}
],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["LoadBalancerName"], "test-elb-invalid-policy")
def test_filter_validation_no_blacklist(self):
self.assertRaises(
PolicyValidationError,
self.load_policy,
{
"name": "test-ssl-ciphers",
"resource": "elb",
"filters": [{"type": "ssl-policy"}],
},
session_factory=None,
validate=False,
)
def test_filter_validation_blacklist_not_iterable(self):
self.assertRaises(
PolicyValidationError,
self.load_policy,
{
"name": "test-ssl-ciphers",
"resource": "elb",
"filters": [{"type": "ssl-policy", "blacklist": "single-value"}],
},
session_factory=None,
validate=False,
)
class TestDefaultVpc(BaseTest):
def test_elb_default_vpc(self):
session_factory = self.replay_flight_data("test_elb_default_vpc")
p = self.load_policy(
{
"name": "elb-default-filters",
"resource": "elb",
"filters": [{"type": "default-vpc"}],
},
config={"region": "us-west-2"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["LoadBalancerName"], "test-load-balancer")
class TestModifyVpcSecurityGroupsAction(BaseTest):
def test_elb_remove_security_groups(self):
# Test conditions:
# - running ELB in default VPC
# - security group named TEST-PROD-ONLY-SG exists in VPC and is
# attached to test ELB
session_factory = self.replay_flight_data("test_elb_remove_security_groups")
client = session_factory().client("ec2")
default_sg_id = client.describe_security_groups(GroupNames=["default"])[
"SecurityGroups"
][
0
][
"GroupId"
]
p = self.load_policy(
{
"name": "elb-modify-security-groups-filter",
"resource": "elb",
"filters": [
{
"type": "security-group",
"key": "GroupName",
"value": "(.*PROD-ONLY.*)",
"op": "regex",
}
],
"actions": [
{
"type": "modify-security-groups",
"remove": "matched",
"isolation-group": default_sg_id,
}
],
},
session_factory=session_factory,
)
resources = p.run()
clean_resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["LoadBalancerName"], "test-load-balancer")
self.assertEqual(len(clean_resources), 0)
def test_elb_add_security_group(self):
# Test conditions:
# - running one ELB with 'default' VPC security group attached
# - security group named TEST-PROD-ONLY-SG exists in VPC and is not
# attached to ELB
session_factory = self.replay_flight_data("test_elb_add_security_group")
policy = self.load_policy(
{
"name": "add-sg-to-prod-elb",
"resource": "elb",
"filters": [
{"type": "security-group", "key": "GroupName", "value": "default"},
{
"type": "value",
"key": "LoadBalancerName",
"value": "test-load-balancer",
},
],
"actions": [{"type": "modify-security-groups", "add": "sg-411b413c"}],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources[0]["SecurityGroups"]), 1)
policy.validate()
after_resources = policy.run()
self.assertEqual(len(after_resources[0]["SecurityGroups"]), 2)
def test_elb_add_security_groups(self):
# Test conditions:
# - running one ELB with 'default' VPC security group attached
# - security groups named TEST-PROD-ONLY-SG, TEST-SG1, and TEST-SG2
# exist in VPC - not attached to ELB
session_factory = self.replay_flight_data("test_elb_add_security_groups")
policy = self.load_policy(
{
"name": "add-sgs-to-prod-elb",
"resource": "elb",
"filters": [
{"type": "security-group", "key": "GroupName", "value": "default"},
{
"type": "value",
"key": "LoadBalancerName",
"value": "test-load-balancer",
},
],
"actions": [
{
"type": "modify-security-groups",
"add": ["sg-411b413c", "sg-8a4b64f7", "sg-5d4a6520"],
}
],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources[0]["SecurityGroups"]), 1)
policy.validate()
after_resources = policy.run()
self.assertEqual(len(after_resources[0]["SecurityGroups"]), 4)
def test_elb_remove_all_security_groups(self):
# Test conditions:
# - running one ELB with 'default' and 'TEST-PROD-ONLY-SG' VPC
# security groups attached
session_factory = self.replay_flight_data("test_elb_remove_all_security_groups")
client = session_factory().client("ec2")
default_sg_id = client.describe_security_groups(GroupNames=["default"])[
"SecurityGroups"
][
0
][
"GroupId"
]
policy = self.load_policy(
{
"name": "add-sg-to-prod-elb",
"resource": "elb",
"filters": [
{
"type": "value",
"key": "LoadBalancerName",
"value": "test-load-balancer",
}
],
"actions": [
{
"type": "modify-security-groups",
"remove": "all",
"isolation-group": default_sg_id,
}
],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources[0]["SecurityGroups"]), 2)
policy.validate()
after_resources = policy.run()
self.assertEqual(len(after_resources[0]["SecurityGroups"]), 1)
# Check that it is indeed the isolation group on the ELB
self.assertEqual(after_resources[0]["SecurityGroups"][0], default_sg_id)
class TestElbLogging(BaseTest):
def test_enable_s3_logging(self):
session_factory = self.replay_flight_data("test_elb_enable_s3_logging")
policy = self.load_policy(
{
"name": "test-enable-s3-logging",
"resource": "elb",
"filters": [
{"type": "value", "key": "LoadBalancerName", "value": "elb1"}
],
"actions": [
{
"type": "enable-s3-logging",
"bucket": "elbv2logtest",
"prefix": "elblogs",
"emit_interval": 5,
}
],
},
session_factory=session_factory,
)
resources = policy.run()
client = session_factory().client("elb")
for elb in resources:
elb_name = elb["LoadBalancerName"]
results = client.describe_load_balancer_attributes(
LoadBalancerName=elb_name
)
elb["Attributes"] = results["LoadBalancerAttributes"]
self.assertEqual(resources[0]["Attributes"]["AccessLog"]["EmitInterval"], 5)
self.assertEqual(
resources[0]["Attributes"]["AccessLog"]["S3BucketName"], "elbv2logtest"
)
self.assertEqual(
resources[0]["Attributes"]["AccessLog"]["S3BucketPrefix"], "elblogs"
)
self.assertTrue(resources[0]["Attributes"]["AccessLog"]["Enabled"])
def test_disable_s3_logging(self):
session_factory = self.replay_flight_data("test_elb_disable_s3_logging")
policy = self.load_policy(
{
"name": "test-disable-s3-logging",
"resource": "elb",
"filters": [
{"type": "value", "key": "LoadBalancerName", "value": "elb1"}
],
"actions": [{"type": "disable-s3-logging"}],
},
session_factory=session_factory,
)
resources = policy.run()
client = session_factory().client("elb")
for elb in resources:
elb_name = elb["LoadBalancerName"]
results = client.describe_load_balancer_attributes(
LoadBalancerName=elb_name
)
elb["Attributes"] = results["LoadBalancerAttributes"]
self.assertFalse(resources[0]["Attributes"]["AccessLog"]["Enabled"])
class TestElbIsLoggingFilter(BaseTest):
""" replicate
- name: elb-is-logging-to-bucket-test
resource: elb
filters:
- type: is-logging
bucket: elbv2logtest
"""
def test_is_logging_to_bucket(self):
session_factory = self.replay_flight_data("test_elb_is_logging_filter")
policy = self.load_policy(
{
"name": "elb-is-logging-to-bucket-test",
"resource": "elb",
"filters": [{"type": "is-logging", "bucket": "elbv2logtest"}],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertGreater(
len(resources), 0, "Test should find elbs logging " "to elbv2logtest"
)
class TestElbIsNotLoggingFilter(BaseTest):
""" replicate
- name: elb-is-not-logging-to-bucket-test
resource: elb
filters:
- type: is-not-logging
bucket: otherbucket
"""
def test_is_logging_to_bucket(self):
session_factory = self.replay_flight_data("test_elb_is_logging_filter")
policy = self.load_policy(
{
"name": "elb-is-not-logging-to-bucket-test",
"resource": "elb",
"filters": [{"type": "is-not-logging", "bucket": "otherbucket"}],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertGreater(
len(resources), 0, "Should find elb not logging " "to otherbucket"
)
| apache-2.0 |
Nethertech/Intellectus | cocos2d/plugin/tools/toolsForGame/steps.py | 255 | 2302 | import sys, string, os
from Tkinter import *
# define class step
class step:
stepFrame = None
def initStep(self, root):
return
def checkStep(self):
return None
# define class step1
class step1(step):
step_entry = None
def initStep(self, root):
self.stepFrame = Frame(root)
step_tip = Label(self.stepFrame, text="Input the android project path of your game:")
step_tip.pack(anchor='nw', padx=30)
step_tip2 = Label(self.stepFrame, text="(Pleasd avoid using spaces in your project path)")
step_tip2.pack(anchor='nw', padx=30)
self.step_entry = Entry(self.stepFrame)
self.step_entry.pack(anchor='nw', fill=X, padx=30)
return
def checkStep(self):
tipStr = None
projPath = self.step_entry.get()
haveDir = os.path.exists(projPath)
isPorj = os.path.exists(projPath + '/AndroidManifest.xml')
if projPath == None or len(projPath) == 0 or haveDir == False or isPorj == False:
tipStr = 'The project path is wrong'
return tipStr
def getPath(self):
return self.step_entry.get()
# define class step2
class step2(step):
checkBtns = []
checkValues = []
def initStep(self, root, pluginList):
self.stepFrame = Frame(root)
step_tip = Label(self.stepFrame, text="Select plugins you needed:")
step_tip.pack(anchor='nw', padx=30)
for plugin in pluginList:
var = StringVar()
self.checkValues.append(var)
btn = Checkbutton(self.stepFrame, text=plugin, variable=var, onvalue=plugin, offvalue='')
btn.pack(anchor='nw', padx=50)
self.checkBtns.append(btn)
return
def checkStep(self):
tipStr = None
num = 0
for var in self.checkValues:
if len(var.get()) != 0:
num += 1
break
if num == 0:
tipStr = 'At least select one plugin'
return tipStr
def getSelectedPlugins(self):
selectPlugins = []
for var in self.checkValues:
if len(var.get()) != 0:
plugin = var.get()
selectPlugins.append(plugin)
return selectPlugins
| mit |
rupran/ansible | lib/ansible/modules/network/ldap_entry.py | 28 | 9767 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Peter Sagerson <psagers@ignorare.net>
# (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ldap_entry
short_description: Add or remove LDAP entries.
description:
- Add or remove LDAP entries. This module only asserts the existence or
non-existence of an LDAP entry, not its attributes. To assert the
attribute values of an entry, see M(ldap_attr).
notes:
- The default authentication settings will attempt to use a SASL EXTERNAL
bind over a UNIX domain socket. This works well with the default Ubuntu
install for example, which includes a cn=peercred,cn=external,cn=auth ACL
rule allowing root to modify the server configuration. If you need to use
a simple bind to access your server, pass the credentials in I(bind_dn)
and I(bind_pw).
version_added: '2.3'
author:
- Jiri Tyr (@jtyr)
requirements:
- python-ldap
options:
bind_dn:
required: false
default: null
description:
- A DN to bind with. If this is omitted, we'll try a SASL bind with
the EXTERNAL mechanism. If this is blank, we'll use an anonymous
bind.
bind_pw:
required: false
default: null
description:
- The password to use with I(bind_dn).
dn:
required: true
description:
- The DN of the entry to add or remove.
attributes:
required: false
default: null
description:
- If I(state=present), attributes necessary to create an entry. Existing
entries are never modified. To assert specific attribute values on an
existing entry, use M(ldap_attr) module instead.
objectClass:
required: false
default: null
description:
- If I(state=present), value or list of values to use when creating
the entry. It can either be a string or an actual list of
strings.
params:
required: false
default: null
description:
- List of options which allows to overwrite any of the task or the
I(attributes) options. To remove an option, set the value of the option
to C(null).
server_uri:
required: false
default: ldapi:///
description:
- A URI to the LDAP server. The default value lets the underlying
LDAP client library look for a UNIX domain socket in its default
location.
start_tls:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- If true, we'll use the START_TLS LDAP extension.
state:
required: false
choices: [present, absent]
default: present
description:
- The target state of the entry.
"""
EXAMPLES = """
- name: Make sure we have a parent entry for users
ldap_entry:
dn: ou=users,dc=example,dc=com
objectClass: organizationalUnit
- name: Make sure we have an admin user
ldap_entry:
dn: cn=admin,dc=example,dc=com
objectClass:
- simpleSecurityObject
- organizationalRole
attributes:
description: An LDAP administrator
userPassword: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND"
- name: Get rid of an old entry
ldap_entry:
dn: ou=stuff,dc=example,dc=com
state: absent
server_uri: ldap://localhost/
bind_dn: cn=admin,dc=example,dc=com
bind_pw: password
#
# The same as in the previous example but with the authentication details
# stored in the ldap_auth variable:
#
# ldap_auth:
# server_uri: ldap://localhost/
# bind_dn: cn=admin,dc=example,dc=com
# bind_pw: password
- name: Get rid of an old entry
ldap_entry:
dn: ou=stuff,dc=example,dc=com
state: absent
params: "{{ ldap_auth }}"
"""
RETURN = """
# Default return values
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
try:
import ldap
import ldap.modlist
import ldap.sasl
HAS_LDAP = True
except ImportError:
HAS_LDAP = False
class LdapEntry(object):
def __init__(self, module):
# Shortcuts
self.module = module
self.bind_dn = self.module.params['bind_dn']
self.bind_pw = self.module.params['bind_pw']
self.dn = self.module.params['dn']
self.server_uri = self.module.params['server_uri']
self.start_tls = self.module.params['start_tls']
self.state = self.module.params['state']
# Add the objectClass into the list of attributes
self.module.params['attributes']['objectClass'] = (
self.module.params['objectClass'])
# Load attributes
if self.state == 'present':
self.attrs = self._load_attrs()
# Establish connection
self.connection = self._connect_to_ldap()
def _load_attrs(self):
""" Turn attribute's value to array. """
attrs = {}
for name, value in self.module.params['attributes'].items():
if name not in attrs:
attrs[name] = []
if isinstance(value, list):
attrs[name] = value
else:
attrs[name].append(str(value))
return attrs
def add(self):
""" If self.dn does not exist, returns a callable that will add it. """
def _add():
self.connection.add_s(self.dn, modlist)
if not self._is_entry_present():
modlist = ldap.modlist.addModlist(self.attrs)
action = _add
else:
action = None
return action
def delete(self):
""" If self.dn exists, returns a callable that will delete it. """
def _delete():
self.connection.delete_s(self.dn)
if self._is_entry_present():
action = _delete
else:
action = None
return action
def _is_entry_present(self):
try:
self.connection.search_s(self.dn, ldap.SCOPE_BASE)
except ldap.NO_SUCH_OBJECT:
is_present = False
else:
is_present = True
return is_present
def _connect_to_ldap(self):
connection = ldap.initialize(self.server_uri)
if self.start_tls:
try:
connection.start_tls_s()
except ldap.LDAPError:
e = get_exception()
self.module.fail_json(msg="Cannot start TLS.", details=str(e))
try:
if self.bind_dn is not None:
connection.simple_bind_s(self.bind_dn, self.bind_pw)
else:
connection.sasl_interactive_bind_s('', ldap.sasl.external())
except ldap.LDAPError:
e = get_exception()
self.module.fail_json(
msg="Cannot bind to the server.", details=str(e))
return connection
def main():
module = AnsibleModule(
argument_spec={
'attributes': dict(default={}, type='dict'),
'bind_dn': dict(),
'bind_pw': dict(default='', no_log=True),
'dn': dict(required=True),
'objectClass': dict(type='raw'),
'params': dict(type='dict'),
'server_uri': dict(default='ldapi:///'),
'start_tls': dict(default=False, type='bool'),
'state': dict(default='present', choices=['present', 'absent']),
},
supports_check_mode=True,
)
if not HAS_LDAP:
module.fail_json(
msg="Missing requried 'ldap' module (pip install python-ldap).")
state = module.params['state']
# Chek if objectClass is present when needed
if state == 'present' and module.params['objectClass'] is None:
module.fail_json(msg="At least one objectClass must be provided.")
# Check if objectClass is of the correct type
if (
module.params['objectClass'] is not None and not (
isinstance(module.params['objectClass'], basestring) or
isinstance(module.params['objectClass'], list))):
module.fail_json(msg="objectClass must be either a string or a list.")
# Update module parameters with user's parameters if defined
if 'params' in module.params and isinstance(module.params['params'], dict):
for key, val in module.params['params'].items():
if key in module.argument_spec:
module.params[key] = val
else:
module.params['attributes'][key] = val
# Remove the params
module.params.pop('params', None)
# Instantiate the LdapEntry object
ldap = LdapEntry(module)
# Get the action function
if state == 'present':
action = ldap.add()
elif state == 'absent':
action = ldap.delete()
# Perform the action
if action is not None and not module.check_mode:
try:
action()
except Exception:
e = get_exception()
module.fail_json(msg="Entry action failed.", details=str(e))
module.exit_json(changed=(action is not None))
if __name__ == '__main__':
main()
| gpl-3.0 |
aldian/tensorflow | tensorflow/python/framework/tensor_shape_div_test.py | 178 | 1495 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test that old style division works for Dimension."""
from __future__ import absolute_import
# from __future__ import division # Intentionally skip this import
from __future__ import print_function
import six
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class DimensionDivTest(test_util.TensorFlowTestCase):
def testDivSucceeds(self):
"""Without from __future__ import division, __div__ should work."""
if six.PY2: # Old division exists only in Python 2
values = [tensor_shape.Dimension(x) for x in (3, 7, 11, None)]
for x in values:
for y in values:
self.assertEqual((x / y).value, (x // y).value)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
jochemdu/nyanchain | Abe/deserialize.py | 6 | 12559 | #
#
#
from BCDataStream import *
from enumeration import Enumeration
from base58 import public_key_to_bc_address, hash_160_to_bc_address
import logging
import socket
import time
from util import short_hex, long_hex
import struct
def parse_CAddress(vds):
d = {}
d['nVersion'] = vds.read_int32()
d['nTime'] = vds.read_uint32()
d['nServices'] = vds.read_uint64()
d['pchReserved'] = vds.read_bytes(12)
d['ip'] = socket.inet_ntoa(vds.read_bytes(4))
d['port'] = socket.htons(vds.read_uint16())
return d
def deserialize_CAddress(d):
return d['ip']+":"+str(d['port'])+" (lastseen: %s)"%(time.ctime(d['nTime']),)
def parse_setting(setting, vds):
if setting[0] == "f": # flag (boolean) settings
return str(vds.read_boolean())
elif setting == "addrIncoming":
return "" # bitcoin 0.4 purposely breaks addrIncoming setting in encrypted wallets.
elif setting[0:4] == "addr": # CAddress
d = parse_CAddress(vds)
return deserialize_CAddress(d)
elif setting == "nTransactionFee":
return vds.read_int64()
elif setting == "nLimitProcessors":
return vds.read_int32()
return 'unknown setting'
def parse_TxIn(vds):
d = {}
d['prevout_hash'] = vds.read_bytes(32)
d['prevout_n'] = vds.read_uint32()
d['scriptSig'] = vds.read_bytes(vds.read_compact_size())
d['sequence'] = vds.read_uint32()
return d
def deserialize_TxIn(d, transaction_index=None, owner_keys=None):
if d['prevout_hash'] == "\x00"*32:
result = "TxIn: COIN GENERATED"
result += " coinbase:"+d['scriptSig'].encode('hex_codec')
elif transaction_index is not None and d['prevout_hash'] in transaction_index:
p = transaction_index[d['prevout_hash']]['txOut'][d['prevout_n']]
result = "TxIn: value: %f"%(p['value']/1.0e8,)
result += " prev("+long_hex(d['prevout_hash'][::-1])+":"+str(d['prevout_n'])+")"
else:
result = "TxIn: prev("+long_hex(d['prevout_hash'][::-1])+":"+str(d['prevout_n'])+")"
pk = extract_public_key(d['scriptSig'])
result += " pubkey: "+pk
result += " sig: "+decode_script(d['scriptSig'])
if d['sequence'] < 0xffffffff: result += " sequence: "+hex(d['sequence'])
return result
def parse_TxOut(vds):
d = {}
d['value'] = vds.read_int64()
d['scriptPubKey'] = vds.read_bytes(vds.read_compact_size())
return d
def deserialize_TxOut(d, owner_keys=None):
result = "TxOut: value: %f"%(d['value']/1.0e8,)
pk = extract_public_key(d['scriptPubKey'])
result += " pubkey: "+pk
result += " Script: "+decode_script(d['scriptPubKey'])
if owner_keys is not None:
if pk in owner_keys: result += " Own: True"
else: result += " Own: False"
return result
def parse_Transaction(vds):
d = {}
start_pos = vds.read_cursor
d['version'] = vds.read_int32()
n_vin = vds.read_compact_size()
d['txIn'] = []
for i in xrange(n_vin):
d['txIn'].append(parse_TxIn(vds))
n_vout = vds.read_compact_size()
d['txOut'] = []
for i in xrange(n_vout):
d['txOut'].append(parse_TxOut(vds))
d['lockTime'] = vds.read_uint32()
d['__data__'] = vds.input[start_pos:vds.read_cursor]
return d
def deserialize_Transaction(d, transaction_index=None, owner_keys=None, print_raw_tx=False):
result = "%d tx in, %d out\n"%(len(d['txIn']), len(d['txOut']))
for txIn in d['txIn']:
result += deserialize_TxIn(txIn, transaction_index) + "\n"
for txOut in d['txOut']:
result += deserialize_TxOut(txOut, owner_keys) + "\n"
if print_raw_tx == True:
result += "Transaction hex value: " + d['__data__'].encode('hex') + "\n"
return result
def parse_MerkleTx(vds):
d = parse_Transaction(vds)
d['hashBlock'] = vds.read_bytes(32)
n_merkleBranch = vds.read_compact_size()
d['merkleBranch'] = vds.read_bytes(32*n_merkleBranch)
d['nIndex'] = vds.read_int32()
return d
def deserialize_MerkleTx(d, transaction_index=None, owner_keys=None):
tx = deserialize_Transaction(d, transaction_index, owner_keys)
result = "block: "+(d['hashBlock'][::-1]).encode('hex_codec')
result += " %d hashes in merkle branch\n"%(len(d['merkleBranch'])/32,)
return result+tx
def parse_WalletTx(vds):
d = parse_MerkleTx(vds)
n_vtxPrev = vds.read_compact_size()
d['vtxPrev'] = []
for i in xrange(n_vtxPrev):
d['vtxPrev'].append(parse_MerkleTx(vds))
d['mapValue'] = {}
n_mapValue = vds.read_compact_size()
for i in xrange(n_mapValue):
key = vds.read_string()
value = vds.read_string()
d['mapValue'][key] = value
n_orderForm = vds.read_compact_size()
d['orderForm'] = []
for i in xrange(n_orderForm):
first = vds.read_string()
second = vds.read_string()
d['orderForm'].append( (first, second) )
d['fTimeReceivedIsTxTime'] = vds.read_uint32()
d['timeReceived'] = vds.read_uint32()
d['fromMe'] = vds.read_boolean()
d['spent'] = vds.read_boolean()
return d
def deserialize_WalletTx(d, transaction_index=None, owner_keys=None):
result = deserialize_MerkleTx(d, transaction_index, owner_keys)
result += "%d vtxPrev txns\n"%(len(d['vtxPrev']),)
result += "mapValue:"+str(d['mapValue'])
if len(d['orderForm']) > 0:
result += "\n"+" orderForm:"+str(d['orderForm'])
result += "\n"+"timeReceived:"+time.ctime(d['timeReceived'])
result += " fromMe:"+str(d['fromMe'])+" spent:"+str(d['spent'])
return result
# The CAuxPow (auxiliary proof of work) structure supports merged mining.
# A flag in the block version field indicates the structure's presence.
# As of 8/2011, the Original Bitcoin Client does not use it. CAuxPow
# originated in Namecoin; see
# https://github.com/vinced/namecoin/blob/mergedmine/doc/README_merged-mining.md.
def parse_AuxPow(vds):
d = parse_MerkleTx(vds)
n_chainMerkleBranch = vds.read_compact_size()
d['chainMerkleBranch'] = vds.read_bytes(32*n_chainMerkleBranch)
d['chainIndex'] = vds.read_int32()
d['parentBlock'] = parse_BlockHeader(vds)
return d
def parse_BlockHeader(vds):
d = {}
header_start = vds.read_cursor
d['version'] = vds.read_int32()
d['hashPrev'] = vds.read_bytes(32)
d['hashMerkleRoot'] = vds.read_bytes(32)
d['nTime'] = vds.read_uint32()
d['nBits'] = vds.read_uint32()
d['nNonce'] = vds.read_uint32()
header_end = vds.read_cursor
d['__header__'] = vds.input[header_start:header_end]
return d
def parse_Block(vds):
d = parse_BlockHeader(vds)
d['transactions'] = []
# if d['version'] & (1 << 8):
# d['auxpow'] = parse_AuxPow(vds)
nTransactions = vds.read_compact_size()
for i in xrange(nTransactions):
d['transactions'].append(parse_Transaction(vds))
return d
def deserialize_Block(d, print_raw_tx=False):
result = "Time: "+time.ctime(d['nTime'])+" Nonce: "+str(d['nNonce'])
result += "\nnBits: 0x"+hex(d['nBits'])
result += "\nhashMerkleRoot: 0x"+d['hashMerkleRoot'][::-1].encode('hex_codec')
result += "\nPrevious block: "+d['hashPrev'][::-1].encode('hex_codec')
result += "\n%d transactions:\n"%len(d['transactions'])
for t in d['transactions']:
result += deserialize_Transaction(t, print_raw_tx=print_raw_tx)+"\n"
result += "\nRaw block header: "+d['__header__'].encode('hex_codec')
return result
def parse_BlockLocator(vds):
d = { 'hashes' : [] }
nHashes = vds.read_compact_size()
for i in xrange(nHashes):
d['hashes'].append(vds.read_bytes(32))
return d
def deserialize_BlockLocator(d):
result = "Block Locator top: "+d['hashes'][0][::-1].encode('hex_codec')
return result
opcodes = Enumeration("Opcodes", [
("OP_0", 0), ("OP_PUSHDATA1",76), "OP_PUSHDATA2", "OP_PUSHDATA4", "OP_1NEGATE", "OP_RESERVED",
"OP_1", "OP_2", "OP_3", "OP_4", "OP_5", "OP_6", "OP_7",
"OP_8", "OP_9", "OP_10", "OP_11", "OP_12", "OP_13", "OP_14", "OP_15", "OP_16",
"OP_NOP", "OP_VER", "OP_IF", "OP_NOTIF", "OP_VERIF", "OP_VERNOTIF", "OP_ELSE", "OP_ENDIF", "OP_VERIFY",
"OP_RETURN", "OP_TOALTSTACK", "OP_FROMALTSTACK", "OP_2DROP", "OP_2DUP", "OP_3DUP", "OP_2OVER", "OP_2ROT", "OP_2SWAP",
"OP_IFDUP", "OP_DEPTH", "OP_DROP", "OP_DUP", "OP_NIP", "OP_OVER", "OP_PICK", "OP_ROLL", "OP_ROT",
"OP_SWAP", "OP_TUCK", "OP_CAT", "OP_SUBSTR", "OP_LEFT", "OP_RIGHT", "OP_SIZE", "OP_INVERT", "OP_AND",
"OP_OR", "OP_XOR", "OP_EQUAL", "OP_EQUALVERIFY", "OP_RESERVED1", "OP_RESERVED2", "OP_1ADD", "OP_1SUB", "OP_2MUL",
"OP_2DIV", "OP_NEGATE", "OP_ABS", "OP_NOT", "OP_0NOTEQUAL", "OP_ADD", "OP_SUB", "OP_MUL", "OP_DIV",
"OP_MOD", "OP_LSHIFT", "OP_RSHIFT", "OP_BOOLAND", "OP_BOOLOR",
"OP_NUMEQUAL", "OP_NUMEQUALVERIFY", "OP_NUMNOTEQUAL", "OP_LESSTHAN",
"OP_GREATERTHAN", "OP_LESSTHANOREQUAL", "OP_GREATERTHANOREQUAL", "OP_MIN", "OP_MAX",
"OP_WITHIN", "OP_RIPEMD160", "OP_SHA1", "OP_SHA256", "OP_HASH160",
"OP_HASH256", "OP_CODESEPARATOR", "OP_CHECKSIG", "OP_CHECKSIGVERIFY", "OP_CHECKMULTISIG",
"OP_CHECKMULTISIGVERIFY",
"OP_NOP1", "OP_NOP2", "OP_NOP3", "OP_NOP4", "OP_NOP5", "OP_NOP6", "OP_NOP7", "OP_NOP8", "OP_NOP9", "OP_NOP10",
("OP_INVALIDOPCODE", 0xFF),
])
def script_GetOp(bytes):
i = 0
while i < len(bytes):
vch = None
opcode = ord(bytes[i])
i += 1
if opcode <= opcodes.OP_PUSHDATA4:
nSize = opcode
if opcode == opcodes.OP_PUSHDATA1:
if i + 1 > len(bytes):
vch = "_INVALID_NULL"
i = len(bytes)
else:
nSize = ord(bytes[i])
i += 1
elif opcode == opcodes.OP_PUSHDATA2:
if i + 2 > len(bytes):
vch = "_INVALID_NULL"
i = len(bytes)
else:
(nSize,) = struct.unpack_from('<H', bytes, i)
i += 2
elif opcode == opcodes.OP_PUSHDATA4:
if i + 4 > len(bytes):
vch = "_INVALID_NULL"
i = len(bytes)
else:
(nSize,) = struct.unpack_from('<I', bytes, i)
i += 4
if i+nSize > len(bytes):
vch = "_INVALID_"+bytes[i:]
i = len(bytes)
else:
vch = bytes[i:i+nSize]
i += nSize
yield (opcode, vch, i)
def script_GetOpName(opcode):
try:
return (opcodes.whatis(opcode)).replace("OP_", "")
except KeyError:
return "InvalidOp_"+str(opcode)
def decode_script(bytes):
result = ''
for (opcode, vch, i) in script_GetOp(bytes):
if len(result) > 0: result += " "
if opcode <= opcodes.OP_PUSHDATA4:
result += "%d:"%(opcode,)
result += short_hex(vch)
else:
result += script_GetOpName(opcode)
return result
def match_decoded(decoded, to_match):
if len(decoded) != len(to_match):
return False;
for i in range(len(decoded)):
if to_match[i] == opcodes.OP_PUSHDATA4 and decoded[i][0] <= opcodes.OP_PUSHDATA4:
continue # Opcodes below OP_PUSHDATA4 all just push data onto stack, and are equivalent.
if to_match[i] != decoded[i][0]:
return False
return True
def extract_public_key(bytes, version='\x00'):
try:
decoded = [ x for x in script_GetOp(bytes) ]
except struct.error:
return "(None)"
# non-generated TxIn transactions push a signature
# (seventy-something bytes) and then their public key
# (33 or 65 bytes) onto the stack:
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
return public_key_to_bc_address(decoded[1][1], version=version)
# The Genesis Block, self-payments, and pay-by-IP-address payments look like:
# 65 BYTES:... CHECKSIG
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_CHECKSIG ]
if match_decoded(decoded, match):
return public_key_to_bc_address(decoded[0][1], version=version)
# Pay-by-Bitcoin-address TxOuts look like:
# DUP HASH160 20 BYTES:... EQUALVERIFY CHECKSIG
match = [ opcodes.OP_DUP, opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG ]
if match_decoded(decoded, match):
return hash_160_to_bc_address(decoded[2][1], version=version)
# BIP11 TxOuts look like one of these:
# Note that match_decoded is dumb, so OP_1 actually matches OP_1/2/3/etc:
multisigs = [
[ opcodes.OP_1, opcodes.OP_PUSHDATA4, opcodes.OP_1, opcodes.OP_CHECKMULTISIG ],
[ opcodes.OP_2, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_2, opcodes.OP_CHECKMULTISIG ],
[ opcodes.OP_3, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_3, opcodes.OP_CHECKMULTISIG ]
]
for match in multisigs:
if match_decoded(decoded, match):
return "["+','.join([public_key_to_bc_address(decoded[i][1]) for i in range(1,len(decoded)-1)])+"]"
# BIP16 TxOuts look like:
# HASH160 20 BYTES:... EQUAL
match = [ opcodes.OP_HASH160, 0x14, opcodes.OP_EQUAL ]
if match_decoded(decoded, match):
return hash_160_to_bc_address(decoded[1][1], version="\x05")
return "(None)"
| agpl-3.0 |
kosgroup/odoo | addons/mail/models/mail_channel.py | 3 | 33607 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from email.utils import formataddr
import re
import uuid
from odoo import _, api, fields, models, modules, tools
from odoo.exceptions import UserError
from odoo.osv import expression
from odoo.tools import ormcache
from odoo.tools.safe_eval import safe_eval
class ChannelPartner(models.Model):
_name = 'mail.channel.partner'
_description = 'Last Seen Many2many'
_table = 'mail_channel_partner'
_rec_name = 'partner_id'
partner_id = fields.Many2one('res.partner', string='Recipient', ondelete='cascade')
channel_id = fields.Many2one('mail.channel', string='Channel', ondelete='cascade')
seen_message_id = fields.Many2one('mail.message', string='Last Seen')
fold_state = fields.Selection([('open', 'Open'), ('folded', 'Folded'), ('closed', 'Closed')], string='Conversation Fold State', default='open')
is_minimized = fields.Boolean("Conversation is minimized")
is_pinned = fields.Boolean("Is pinned on the interface", default=True)
class Channel(models.Model):
""" A mail.channel is a discussion group that may behave like a listener
on documents. """
_description = 'Discussion channel'
_name = 'mail.channel'
_mail_flat_thread = False
_mail_post_access = 'read'
_inherit = ['mail.thread']
_inherits = {'mail.alias': 'alias_id'}
MAX_BOUNCE_LIMIT = 10
def _get_default_image(self):
image_path = modules.get_module_resource('mail', 'static/src/img', 'groupdefault.png')
return tools.image_resize_image_big(open(image_path, 'rb').read().encode('base64'))
name = fields.Char('Name', required=True, translate=True)
channel_type = fields.Selection([
('chat', 'Chat Discussion'),
('channel', 'Channel')],
'Channel Type', default='channel')
description = fields.Text('Description')
uuid = fields.Char('UUID', size=50, index=True, default=lambda self: '%s' % uuid.uuid4())
email_send = fields.Boolean('Send messages by email', default=False)
# multi users channel
channel_last_seen_partner_ids = fields.One2many('mail.channel.partner', 'channel_id', string='Last Seen')
channel_partner_ids = fields.Many2many('res.partner', 'mail_channel_partner', 'channel_id', 'partner_id', string='Listeners')
channel_message_ids = fields.Many2many('mail.message', 'mail_message_mail_channel_rel')
is_member = fields.Boolean('Is a member', compute='_compute_is_member')
# access
public = fields.Selection([
('public', 'Everyone'),
('private', 'Invited people only'),
('groups', 'Selected group of users')],
'Privacy', required=True, default='groups',
help='This group is visible by non members. Invisible groups can add members through the invite button.')
group_public_id = fields.Many2one('res.groups', string='Authorized Group',
default=lambda self: self.env.ref('base.group_user'))
group_ids = fields.Many2many(
'res.groups', string='Auto Subscription',
help="Members of those groups will automatically added as followers. "
"Note that they will be able to manage their subscription manually "
"if necessary.")
# image: all image fields are base64 encoded and PIL-supported
image = fields.Binary("Photo", default=_get_default_image, attachment=True,
help="This field holds the image used as photo for the group, limited to 1024x1024px.")
image_medium = fields.Binary('Medium-sized photo', attachment=True,
help="Medium-sized photo of the group. It is automatically "
"resized as a 128x128px image, with aspect ratio preserved. "
"Use this field in form views or some kanban views.")
image_small = fields.Binary('Small-sized photo', attachment=True,
help="Small-sized photo of the group. It is automatically "
"resized as a 64x64px image, with aspect ratio preserved. "
"Use this field anywhere a small image is required.")
alias_id = fields.Many2one(
'mail.alias', 'Alias', ondelete="restrict", required=True,
help="The email address associated with this group. New emails received will automatically create new topics.")
is_subscribed = fields.Boolean(
'Is Subscribed', compute='_compute_is_subscribed')
@api.one
@api.depends('channel_partner_ids')
def _compute_is_subscribed(self):
self.is_subscribed = self.env.user.partner_id in self.channel_partner_ids
@api.multi
def _compute_is_member(self):
memberships = self.env['mail.channel.partner'].sudo().search([
('channel_id', 'in', self.ids),
('partner_id', '=', self.env.user.partner_id.id),
])
membership_ids = memberships.mapped('channel_id')
for record in self:
record.is_member = record in membership_ids
@api.model
def create(self, vals):
tools.image_resize_images(vals)
# Create channel and alias
channel = super(Channel, self.with_context(
alias_model_name=self._name, alias_parent_model_name=self._name, mail_create_nolog=True, mail_create_nosubscribe=True)
).create(vals)
channel.alias_id.write({"alias_force_thread_id": channel.id, 'alias_parent_thread_id': channel.id})
if vals.get('group_ids'):
channel._subscribe_users()
# make channel listen itself: posting on a channel notifies the channel
if not self._context.get('mail_channel_noautofollow'):
channel.message_subscribe(channel_ids=[channel.id])
return channel
@api.multi
def unlink(self):
aliases = self.mapped('alias_id')
# Delete mail.channel
try:
all_emp_group = self.env.ref('mail.channel_all_employees')
except ValueError:
all_emp_group = None
if all_emp_group and all_emp_group in self:
raise UserError(_('You cannot delete those groups, as the Whole Company group is required by other modules.'))
res = super(Channel, self).unlink()
# Cascade-delete mail aliases as well, as they should not exist without the mail.channel.
aliases.sudo().unlink()
return res
@api.multi
def write(self, vals):
tools.image_resize_images(vals)
result = super(Channel, self).write(vals)
if vals.get('group_ids'):
self._subscribe_users()
return result
def _subscribe_users(self):
for mail_channel in self:
mail_channel.write({'channel_partner_ids': [(4, pid) for pid in mail_channel.mapped('group_ids').mapped('users').mapped('partner_id').ids]})
@api.multi
def action_follow(self):
self.ensure_one()
channel_partner = self.mapped('channel_last_seen_partner_ids').filtered(lambda cp: cp.partner_id == self.env.user.partner_id)
if not channel_partner:
return self.write({'channel_last_seen_partner_ids': [(0, 0, {'partner_id': self.env.user.partner_id.id})]})
@api.multi
def action_unfollow(self):
return self._action_unfollow(self.env.user.partner_id)
@api.multi
def _action_unfollow(self, partner):
channel_info = self.channel_info('unsubscribe')[0] # must be computed before leaving the channel (access rights)
result = self.write({'channel_partner_ids': [(3, partner.id)]})
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', partner.id), channel_info)
if not self.email_send:
notification = _('<div class="o_mail_notification">left <a href="#" class="o_channel_redirect" data-oe-id="%s">#%s</a></div>') % (self.id, self.name,)
# post 'channel left' message as root since the partner just unsubscribed from the channel
self.sudo().message_post(body=notification, message_type="notification", subtype="mail.mt_comment", author_id=partner.id)
return result
@api.multi
def _notification_recipients(self, message, groups):
""" All recipients of a message on a channel are considered as partners.
This means they will receive a minimal email, without a link to access
in the backend. Mailing lists should indeed send minimal emails to avoid
the noise. """
groups = super(Channel, self)._notification_recipients(message, groups)
for (index, (group_name, group_func, group_data)) in enumerate(groups):
if group_name != 'customer':
groups[index] = (group_name, lambda partner: False, group_data)
return groups
@api.multi
def message_get_email_values(self, notif_mail=None):
self.ensure_one()
res = super(Channel, self).message_get_email_values(notif_mail=notif_mail)
headers = {}
if res.get('headers'):
try:
headers.update(safe_eval(res['headers']))
except Exception:
pass
headers['Precedence'] = 'list'
# avoid out-of-office replies from MS Exchange
# http://blogs.technet.com/b/exchange/archive/2006/10/06/3395024.aspx
headers['X-Auto-Response-Suppress'] = 'OOF'
if self.alias_domain and self.alias_name:
headers['List-Id'] = '%s.%s' % (self.alias_name, self.alias_domain)
headers['List-Post'] = '<mailto:%s@%s>' % (self.alias_name, self.alias_domain)
# Avoid users thinking it was a personal message
# X-Forge-To: will replace To: after SMTP envelope is determined by ir.mail.server
list_to = '"%s" <%s@%s>' % (self.name, self.alias_name, self.alias_domain)
headers['X-Forge-To'] = list_to
res['headers'] = repr(headers)
return res
@api.multi
def message_receive_bounce(self, email, partner, mail_id=None):
""" Override bounce management to unsubscribe bouncing addresses """
if partner.message_bounce >= self.MAX_BOUNCE_LIMIT:
self._action_unfollow(partner)
return super(Channel, self).message_receive_bounce(email, partner, mail_id=mail_id)
@api.multi
def message_get_recipient_values(self, notif_message=None, recipient_ids=None):
# real mailing list: multiple recipients (hidden by X-Forge-To)
if self.alias_domain and self.alias_name:
return {
'email_to': ','.join(formataddr((partner.name, partner.email)) for partner in self.env['res.partner'].sudo().browse(recipient_ids)),
'recipient_ids': [],
}
return super(Channel, self).message_get_recipient_values(notif_message=notif_message, recipient_ids=recipient_ids)
@api.multi
@api.returns('self', lambda value: value.id)
def message_post(self, body='', subject=None, message_type='notification', subtype=None, parent_id=False, attachments=None, content_subtype='html', **kwargs):
# auto pin 'direct_message' channel partner
self.filtered(lambda channel: channel.channel_type == 'chat').mapped('channel_last_seen_partner_ids').write({'is_pinned': True})
message = super(Channel, self.with_context(mail_create_nosubscribe=True)).message_post(body=body, subject=subject, message_type=message_type, subtype=subtype, parent_id=parent_id, attachments=attachments, content_subtype=content_subtype, **kwargs)
return message
@api.model_cr
def init(self):
self._cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('mail_channel_partner_seen_message_id_idx',))
if not self._cr.fetchone():
self._cr.execute('CREATE INDEX mail_channel_partner_seen_message_id_idx ON mail_channel_partner (channel_id,partner_id,seen_message_id)')
#------------------------------------------------------
# Instant Messaging API
#------------------------------------------------------
# A channel header should be broadcasted:
# - when adding user to channel (only to the new added partners)
# - when folding/minimizing a channel (only to the user making the action)
# A message should be broadcasted:
# - when a message is posted on a channel (to the channel, using _notify() method)
# Anonymous method
@api.multi
def _broadcast(self, partner_ids):
""" Broadcast the current channel header to the given partner ids
:param partner_ids : the partner to notify
"""
notifications = self._channel_channel_notifications(partner_ids)
self.env['bus.bus'].sendmany(notifications)
@api.multi
def _channel_channel_notifications(self, partner_ids):
""" Generate the bus notifications of current channel for the given partner ids
:param partner_ids : the partner to send the current channel header
:returns list of bus notifications (tuple (bus_channe, message_content))
"""
notifications = []
for partner in self.env['res.partner'].browse(partner_ids):
user_id = partner.user_ids and partner.user_ids[0] or False
if user_id:
for channel_info in self.sudo(user_id).channel_info():
notifications.append([(self._cr.dbname, 'res.partner', partner.id), channel_info])
return notifications
@api.multi
def _notify(self, message):
""" Broadcast the given message on the current channels.
Send the message on the Bus Channel (uuid for public mail.channel, and partner private bus channel (the tuple)).
A partner will receive only on message on its bus channel, even if this message belongs to multiple mail channel. Then 'channel_ids' field
of the received message indicates on wich mail channel the message should be displayed.
:param : mail.message to broadcast
"""
message.ensure_one()
notifications = self._channel_message_notifications(message)
self.env['bus.bus'].sendmany(notifications)
@api.multi
def _channel_message_notifications(self, message):
""" Generate the bus notifications for the given message
:param message : the mail.message to sent
:returns list of bus notifications (tuple (bus_channe, message_content))
"""
message_values = message.message_format()[0]
notifications = []
for channel in self:
notifications.append([(self._cr.dbname, 'mail.channel', channel.id), dict(message_values)])
# add uuid to allow anonymous to listen
if channel.public == 'public':
notifications.append([channel.uuid, dict(message_values)])
return notifications
@api.multi
def channel_info(self, extra_info = False):
""" Get the informations header for the current channels
:returns a list of channels values
:rtype : list(dict)
"""
channel_infos = []
partner_channels = self.env['mail.channel.partner']
# find the channel partner state, if logged user
if self.env.user and self.env.user.partner_id:
partner_channels = self.env['mail.channel.partner'].search([('partner_id', '=', self.env.user.partner_id.id), ('channel_id', 'in', self.ids)])
# for each channel, build the information header and include the logged partner information
for channel in self:
info = {
'id': channel.id,
'name': channel.name,
'uuid': channel.uuid,
'state': 'open',
'is_minimized': False,
'channel_type': channel.channel_type,
'public': channel.public,
'mass_mailing': channel.email_send,
}
if extra_info:
info['info'] = extra_info
# add the partner for 'direct mesage' channel
if channel.channel_type == 'chat':
info['direct_partner'] = (channel.sudo()
.with_context(active_test=False)
.channel_partner_ids
.filtered(lambda p: p.id != self.env.user.partner_id.id)
.read(['id', 'name', 'im_status']))
# add user session state, if available and if user is logged
if partner_channels.ids:
partner_channel = partner_channels.filtered(lambda c: channel.id == c.channel_id.id)
if len(partner_channel) >= 1:
partner_channel = partner_channel[0]
info['state'] = partner_channel.fold_state or 'open'
info['is_minimized'] = partner_channel.is_minimized
info['seen_message_id'] = partner_channel.seen_message_id.id
# add needaction and unread counter, since the user is logged
info['message_needaction_counter'] = channel.message_needaction_counter
info['message_unread_counter'] = channel.message_unread_counter
channel_infos.append(info)
return channel_infos
@api.multi
def channel_fetch_message(self, last_id=False, limit=20):
""" Return message values of the current channel.
:param last_id : last message id to start the research
:param limit : maximum number of messages to fetch
:returns list of messages values
:rtype : list(dict)
"""
self.ensure_one()
domain = [("channel_ids", "in", self.ids)]
if last_id:
domain.append(("id", "<", last_id))
return self.env['mail.message'].message_fetch(domain=domain, limit=limit)
# User methods
@api.model
def channel_get(self, partners_to, pin=True):
""" Get the canonical private channel between some partners, create it if needed.
To reuse an old channel (conversation), this one must be private, and contains
only the given partners.
:param partners_to : list of res.partner ids to add to the conversation
:param pin : True if getting the channel should pin it for the current user
:returns a channel header, or False if the users_to was False
:rtype : dict
"""
if partners_to:
partners_to.append(self.env.user.partner_id.id)
# determine type according to the number of partner in the channel
self.env.cr.execute("""
SELECT P.channel_id as channel_id
FROM mail_channel C, mail_channel_partner P
WHERE P.channel_id = C.id
AND C.public LIKE 'private'
AND P.partner_id IN %s
AND channel_type LIKE 'chat'
GROUP BY P.channel_id
HAVING COUNT(P.partner_id) = %s
""", (tuple(partners_to), len(partners_to),))
result = self.env.cr.dictfetchall()
if result:
# get the existing channel between the given partners
channel = self.browse(result[0].get('channel_id'))
# pin up the channel for the current partner
if pin:
self.env['mail.channel.partner'].search([('partner_id', '=', self.env.user.partner_id.id), ('channel_id', '=', channel.id)]).write({'is_pinned': True})
else:
# create a new one
channel = self.create({
'channel_partner_ids': [(4, partner_id) for partner_id in partners_to],
'public': 'private',
'channel_type': 'chat',
'email_send': False,
'name': ', '.join(self.env['res.partner'].sudo().browse(partners_to).mapped('name')),
})
# broadcast the channel header to the other partner (not me)
channel._broadcast(partners_to)
return channel.channel_info()[0]
return False
@api.model
def channel_get_and_minimize(self, partners_to):
channel = self.channel_get(partners_to)
if channel:
self.channel_minimize(channel['uuid'])
return channel
@api.model
def channel_fold(self, uuid, state=None):
""" Update the fold_state of the given session. In order to syncronize web browser
tabs, the change will be broadcast to himself (the current user channel).
Note: the user need to be logged
:param state : the new status of the session for the current user.
"""
domain = [('partner_id', '=', self.env.user.partner_id.id), ('channel_id.uuid', '=', uuid)]
for session_state in self.env['mail.channel.partner'].search(domain):
if not state:
state = session_state.fold_state
if session_state.fold_state == 'open':
state = 'folded'
else:
state = 'open'
session_state.write({
'fold_state': state,
'is_minimized': bool(state != 'closed'),
})
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', self.env.user.partner_id.id), session_state.channel_id.channel_info()[0])
@api.model
def channel_minimize(self, uuid, minimized=True):
values = {
'fold_state': minimized and 'open' or 'closed',
'is_minimized': minimized
}
domain = [('partner_id', '=', self.env.user.partner_id.id), ('channel_id.uuid', '=', uuid)]
channel_partners = self.env['mail.channel.partner'].search(domain)
channel_partners.write(values)
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', self.env.user.partner_id.id), channel_partners.channel_id.channel_info()[0])
@api.model
def channel_pin(self, uuid, pinned=False):
# add the person in the channel, and pin it (or unpin it)
channel = self.search([('uuid', '=', uuid)])
channel_partners = self.env['mail.channel.partner'].search([('partner_id', '=', self.env.user.partner_id.id), ('channel_id', '=', channel.id)])
if not pinned:
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', self.env.user.partner_id.id), channel.channel_info('unsubscribe')[0])
if channel_partners:
channel_partners.write({'is_pinned': pinned})
@api.multi
def channel_seen(self):
self.ensure_one()
if self.channel_message_ids.ids:
last_message_id = self.channel_message_ids.ids[0] # zero is the index of the last message
self.env['mail.channel.partner'].search([('channel_id', 'in', self.ids), ('partner_id', '=', self.env.user.partner_id.id)]).write({'seen_message_id': last_message_id})
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', self.env.user.partner_id.id), {'info': 'channel_seen', 'id': self.id, 'last_message_id': last_message_id})
return last_message_id
@api.multi
def channel_invite(self, partner_ids):
""" Add the given partner_ids to the current channels and broadcast the channel header to them.
:param partner_ids : list of partner id to add
"""
partners = self.env['res.partner'].browse(partner_ids)
# add the partner
for channel in self:
partners_to_add = partners - channel.channel_partner_ids
channel.write({'channel_last_seen_partner_ids': [(0, 0, {'partner_id': partner_id}) for partner_id in partners_to_add.ids]})
for partner in partners_to_add:
notification = _('<div class="o_mail_notification">joined <a href="#" class="o_channel_redirect" data-oe-id="%s">#%s</a></div>') % (self.id, self.name,)
self.message_post(body=notification, message_type="notification", subtype="mail.mt_comment", author_id=partner.id)
# broadcast the channel header to the added partner
self._broadcast(partner_ids)
#------------------------------------------------------
# Instant Messaging View Specific (Slack Client Action)
#------------------------------------------------------
@api.model
def channel_fetch_slot(self):
""" Return the channels of the user grouped by 'slot' (channel, direct_message or private_group), and
the mapping between partner_id/channel_id for direct_message channels.
:returns dict : the grouped channels and the mapping
"""
values = {}
my_partner_id = self.env.user.partner_id.id
pinned_channels = self.env['mail.channel.partner'].search([('partner_id', '=', my_partner_id), ('is_pinned', '=', True)]).mapped('channel_id')
# get the group/public channels
values['channel_channel'] = self.search([('channel_type', '=', 'channel'), ('public', 'in', ['public', 'groups']), ('channel_partner_ids', 'in', [my_partner_id])]).channel_info()
# get the pinned 'direct message' channel
direct_message_channels = self.search([('channel_type', '=', 'chat'), ('id', 'in', pinned_channels.ids)])
values['channel_direct_message'] = direct_message_channels.channel_info()
# get the private group
values['channel_private_group'] = self.search([('channel_type', '=', 'channel'), ('public', '=', 'private'), ('channel_partner_ids', 'in', [my_partner_id])]).channel_info()
return values
@api.model
def channel_search_to_join(self, name=None, domain=None):
""" Return the channel info of the channel the current partner can join
:param name : the name of the researched channels
:param domain : the base domain of the research
:returns dict : channel dict
"""
if not domain:
domain = []
domain = expression.AND([
[('channel_type', '=', 'channel')],
[('channel_partner_ids', 'not in', [self.env.user.partner_id.id])],
[('public', '!=', 'private')],
domain
])
if name:
domain = expression.AND([domain, [('name', 'ilike', '%'+name+'%')]])
return self.search(domain).read(['name', 'public', 'uuid', 'channel_type'])
@api.multi
def channel_join_and_get_info(self):
self.ensure_one()
if self.channel_type == 'channel' and not self.email_send:
notification = _('<div class="o_mail_notification">joined <a href="#" class="o_channel_redirect" data-oe-id="%s">#%s</a></div>') % (self.id, self.name,)
self.message_post(body=notification, message_type="notification", subtype="mail.mt_comment")
self.action_follow()
channel_info = self.channel_info()[0]
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', self.env.user.partner_id.id), channel_info)
return channel_info
@api.model
def channel_create(self, name, privacy='public'):
""" Create a channel and add the current partner, broadcast it (to make the user directly
listen to it when polling)
:param name : the name of the channel to create
:param privacy : privacy of the channel. Should be 'public' or 'private'.
:return dict : channel header
"""
# create the channel
new_channel = self.create({
'name': name,
'public': privacy,
'email_send': False,
'channel_partner_ids': [(4, self.env.user.partner_id.id)]
})
channel_info = new_channel.channel_info('creation')[0]
notification = _('<div class="o_mail_notification">created <a href="#" class="o_channel_redirect" data-oe-id="%s">#%s</a></div>') % (new_channel.id, new_channel.name,)
new_channel.message_post(body=notification, message_type="notification", subtype="mail.mt_comment")
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', self.env.user.partner_id.id), channel_info)
return channel_info
@api.model
def get_mention_suggestions(self, search, limit=8):
""" Return 'limit'-first channels' id, name and public fields such that the name matches a
'search' string. Exclude channels of type chat (DM), and private channels the current
user isn't registered to. """
domain = expression.AND([
[('name', 'ilike', search)],
[('channel_type', '=', 'channel')],
expression.OR([
[('public', '!=', 'private')],
[('channel_partner_ids', 'in', [self.env.user.partner_id.id])]
])
])
return self.search_read(domain, ['id', 'name', 'public'], limit=limit)
@api.model
def channel_fetch_listeners(self, uuid):
""" Return the id, name and email of partners listening to the given channel """
self._cr.execute("""
SELECT P.id, P.name, P.email
FROM mail_channel_partner CP
INNER JOIN res_partner P ON CP.partner_id = P.id
INNER JOIN mail_channel C ON CP.channel_id = C.id
WHERE C.uuid = %s""", (uuid,))
return self._cr.dictfetchall()
@api.multi
def channel_fetch_preview(self):
""" Return the last message of the given channels """
self._cr.execute("""
SELECT mail_channel_id AS id, MAX(mail_message_id) AS message_id
FROM mail_message_mail_channel_rel
WHERE mail_channel_id IN %s
GROUP BY mail_channel_id
""", (tuple(self.ids),))
channels_preview = dict((r['message_id'], r) for r in self._cr.dictfetchall())
last_messages = self.env['mail.message'].browse(channels_preview.keys()).message_format()
for message in last_messages:
channel = channels_preview[message['id']]
del(channel['message_id'])
channel['last_message'] = message
return channels_preview.values()
#------------------------------------------------------
# Commands
#------------------------------------------------------
@api.model
@ormcache()
def get_mention_commands(self):
""" Returns the allowed commands in channels """
commands = []
for n in dir(self):
match = re.search('^_define_command_(.+?)$', n)
if match:
command = getattr(self, n)()
command['name'] = match.group(1)
commands.append(command)
return commands
@api.multi
def execute_command(self, command='', **kwargs):
""" Executes a given command """
self.ensure_one()
command_callback = getattr(self, '_execute_command_' + command, False)
if command_callback:
command_callback(**kwargs)
def _send_transient_message(self, partner_to, content):
""" Notifies partner_to that a message (not stored in DB) has been
written in this channel """
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', partner_to.id), {
'body': "<span class='o_mail_notification'>" + content + "</span>",
'channel_ids': [self.id],
'info': 'transient_message',
})
def _define_command_help(self):
return {'help': _("Show an helper message")}
def _execute_command_help(self, **kwargs):
partner = self.env.user.partner_id
if self.channel_type == 'channel':
msg = _("You are in channel <b>#%s</b>.") % self.name
if self.public == 'private':
msg += _(" This channel is private. People must be invited to join it.")
else:
channel_partners = self.env['mail.channel.partner'].search([('partner_id', '!=', partner.id), ('channel_id', '=', self.id)])
msg = _("You are in a private conversation with <b>@%s</b>.") % channel_partners[0].partner_id.name
msg += _("""<br><br>
You can mention someone by typing <b>@username</b>, this will grab its attention.<br>
You can mention a channel by typing <b>#channel</b>.<br>
You can execute a command by typing <b>/command</b>.<br>
You can insert canned responses in your message by typing <b>:shortcut</b>.<br>""")
self._send_transient_message(partner, msg)
def _define_command_leave(self):
return {'help': _("Leave this channel")}
def _execute_command_leave(self, **kwargs):
if self.channel_type == 'channel':
self.action_unfollow()
else:
self.channel_pin(self.uuid, False)
def _define_command_who(self):
return {
'channel_types': ['channel', 'chat'],
'help': _("List users in the current channel")
}
def _execute_command_who(self, **kwargs):
partner = self.env.user.partner_id
members = [
'<a href="#" data-oe-id='+str(p.id)+' data-oe-model="res.partner">@'+p.name+'</a>'
for p in self.channel_partner_ids[:30] if p != partner
]
if len(members) == 0:
msg = _("You are alone in this channel.")
else:
dots = "..." if len(members) != len(self.channel_partner_ids) - 1 else ""
msg = _("Users in this channel: %s %s and you.") % (", ".join(members), dots)
self._send_transient_message(partner, msg)
| gpl-3.0 |
andyliuliming/azure-linux-extensions | DSC/azure/storage/blobservice.py | 46 | 105417 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from azure import (
WindowsAzureError,
BLOB_SERVICE_HOST_BASE,
DEV_BLOB_HOST,
_ERROR_VALUE_NEGATIVE,
_ERROR_PAGE_BLOB_SIZE_ALIGNMENT,
_convert_class_to_xml,
_dont_fail_not_exist,
_dont_fail_on_exist,
_encode_base64,
_get_request_body,
_get_request_body_bytes_only,
_int_or_none,
_parse_enum_results_list,
_parse_response,
_parse_response_for_dict,
_parse_response_for_dict_filter,
_parse_response_for_dict_prefix,
_parse_simple_list,
_str,
_str_or_none,
_update_request_uri_query_local_storage,
_validate_type_bytes,
_validate_not_none,
)
from azure.http import HTTPRequest
from azure.storage import (
Container,
ContainerEnumResults,
PageList,
PageRange,
SignedIdentifiers,
StorageServiceProperties,
_convert_block_list_to_xml,
_convert_response_to_block_list,
_create_blob_result,
_parse_blob_enum_results_list,
_update_storage_blob_header,
)
from azure.storage.storageclient import _StorageClient
from os import path
import sys
if sys.version_info >= (3,):
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
# Keep this value sync with _ERROR_PAGE_BLOB_SIZE_ALIGNMENT
_PAGE_SIZE = 512
class BlobService(_StorageClient):
'''
This is the main class managing Blob resources.
'''
def __init__(self, account_name=None, account_key=None, protocol='https',
host_base=BLOB_SERVICE_HOST_BASE, dev_host=DEV_BLOB_HOST):
'''
account_name: your storage account name, required for all operations.
account_key: your storage account key, required for all operations.
protocol: Optional. Protocol. Defaults to https.
host_base:
Optional. Live host base url. Defaults to Azure url. Override this
for on-premise.
dev_host: Optional. Dev host url. Defaults to localhost.
'''
self._BLOB_MAX_DATA_SIZE = 64 * 1024 * 1024
self._BLOB_MAX_CHUNK_DATA_SIZE = 4 * 1024 * 1024
super(BlobService, self).__init__(
account_name, account_key, protocol, host_base, dev_host)
def make_blob_url(self, container_name, blob_name, account_name=None,
protocol=None, host_base=None):
'''
Creates the url to access a blob.
container_name: Name of container.
blob_name: Name of blob.
account_name:
Name of the storage account. If not specified, uses the account
specified when BlobService was initialized.
protocol:
Protocol to use: 'http' or 'https'. If not specified, uses the
protocol specified when BlobService was initialized.
host_base:
Live host base url. If not specified, uses the host base specified
when BlobService was initialized.
'''
if not account_name:
account_name = self.account_name
if not protocol:
protocol = self.protocol
if not host_base:
host_base = self.host_base
return '{0}://{1}{2}/{3}/{4}'.format(protocol,
account_name,
host_base,
container_name,
blob_name)
def list_containers(self, prefix=None, marker=None, maxresults=None,
include=None):
'''
The List Containers operation returns a list of the containers under
the specified account.
prefix:
Optional. Filters the results to return only containers whose names
begin with the specified prefix.
marker:
Optional. A string value that identifies the portion of the list to
be returned with the next list operation.
maxresults:
Optional. Specifies the maximum number of containers to return.
include:
Optional. Include this parameter to specify that the container's
metadata be returned as part of the response body. set this
parameter to string 'metadata' to get container's metadata.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/?comp=list'
request.query = [
('prefix', _str_or_none(prefix)),
('marker', _str_or_none(marker)),
('maxresults', _int_or_none(maxresults)),
('include', _str_or_none(include))
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_enum_results_list(response,
ContainerEnumResults,
"Containers",
Container)
def create_container(self, container_name, x_ms_meta_name_values=None,
x_ms_blob_public_access=None, fail_on_exist=False):
'''
Creates a new container under the specified account. If the container
with the same name already exists, the operation fails.
container_name: Name of container to create.
x_ms_meta_name_values:
Optional. A dict with name_value pairs to associate with the
container as metadata. Example:{'Category':'test'}
x_ms_blob_public_access:
Optional. Possible values include: container, blob
fail_on_exist:
specify whether to throw an exception when the container exists.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(container_name) + '?restype=container'
request.headers = [
('x-ms-meta-name-values', x_ms_meta_name_values),
('x-ms-blob-public-access', _str_or_none(x_ms_blob_public_access))
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_container_properties(self, container_name, x_ms_lease_id=None):
'''
Returns all user-defined metadata and system properties for the
specified container.
container_name: Name of existing container.
x_ms_lease_id:
If specified, get_container_properties only succeeds if the
container's lease is active and matches this ID.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(container_name) + '?restype=container'
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict(response)
def get_container_metadata(self, container_name, x_ms_lease_id=None):
'''
Returns all user-defined metadata for the specified container. The
metadata will be in returned dictionary['x-ms-meta-(name)'].
container_name: Name of existing container.
x_ms_lease_id:
If specified, get_container_metadata only succeeds if the
container's lease is active and matches this ID.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '?restype=container&comp=metadata'
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict_prefix(response, prefixes=['x-ms-meta'])
def set_container_metadata(self, container_name,
x_ms_meta_name_values=None, x_ms_lease_id=None):
'''
Sets one or more user-defined name-value pairs for the specified
container.
container_name: Name of existing container.
x_ms_meta_name_values:
A dict containing name, value for metadata.
Example: {'category':'test'}
x_ms_lease_id:
If specified, set_container_metadata only succeeds if the
container's lease is active and matches this ID.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '?restype=container&comp=metadata'
request.headers = [
('x-ms-meta-name-values', x_ms_meta_name_values),
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def get_container_acl(self, container_name, x_ms_lease_id=None):
'''
Gets the permissions for the specified container.
container_name: Name of existing container.
x_ms_lease_id:
If specified, get_container_acl only succeeds if the
container's lease is active and matches this ID.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '?restype=container&comp=acl'
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response(response, SignedIdentifiers)
def set_container_acl(self, container_name, signed_identifiers=None,
x_ms_blob_public_access=None, x_ms_lease_id=None):
'''
Sets the permissions for the specified container.
container_name: Name of existing container.
signed_identifiers: SignedIdentifers instance
x_ms_blob_public_access:
Optional. Possible values include: container, blob
x_ms_lease_id:
If specified, set_container_acl only succeeds if the
container's lease is active and matches this ID.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '?restype=container&comp=acl'
request.headers = [
('x-ms-blob-public-access', _str_or_none(x_ms_blob_public_access)),
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
]
request.body = _get_request_body(
_convert_class_to_xml(signed_identifiers))
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def delete_container(self, container_name, fail_not_exist=False,
x_ms_lease_id=None):
'''
Marks the specified container for deletion.
container_name: Name of container to delete.
fail_not_exist:
Specify whether to throw an exception when the container doesn't
exist.
x_ms_lease_id: Required if the container has an active lease.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(container_name) + '?restype=container'
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def lease_container(self, container_name, x_ms_lease_action,
x_ms_lease_id=None, x_ms_lease_duration=60,
x_ms_lease_break_period=None,
x_ms_proposed_lease_id=None):
'''
Establishes and manages a lock on a container for delete operations.
The lock duration can be 15 to 60 seconds, or can be infinite.
container_name: Name of existing container.
x_ms_lease_action:
Required. Possible values: acquire|renew|release|break|change
x_ms_lease_id: Required if the container has an active lease.
x_ms_lease_duration:
Specifies the duration of the lease, in seconds, or negative one
(-1) for a lease that never expires. A non-infinite lease can be
between 15 and 60 seconds. A lease duration cannot be changed
using renew or change. For backwards compatibility, the default is
60, and the value is only used on an acquire operation.
x_ms_lease_break_period:
Optional. For a break operation, this is the proposed duration of
seconds that the lease should continue before it is broken, between
0 and 60 seconds. This break period is only used if it is shorter
than the time remaining on the lease. If longer, the time remaining
on the lease is used. A new lease will not be available before the
break period has expired, but the lease may be held for longer than
the break period. If this header does not appear with a break
operation, a fixed-duration lease breaks after the remaining lease
period elapses, and an infinite lease breaks immediately.
x_ms_proposed_lease_id:
Optional for acquire, required for change. Proposed lease ID, in a
GUID string format.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('x_ms_lease_action', x_ms_lease_action)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '?restype=container&comp=lease'
request.headers = [
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
('x-ms-lease-action', _str_or_none(x_ms_lease_action)),
('x-ms-lease-duration',
_str_or_none(
x_ms_lease_duration if x_ms_lease_action == 'acquire'\
else None)),
('x-ms-lease-break-period', _str_or_none(x_ms_lease_break_period)),
('x-ms-proposed-lease-id', _str_or_none(x_ms_proposed_lease_id)),
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict_filter(
response,
filter=['x-ms-lease-id', 'x-ms-lease-time'])
def list_blobs(self, container_name, prefix=None, marker=None,
maxresults=None, include=None, delimiter=None):
'''
Returns the list of blobs under the specified container.
container_name: Name of existing container.
prefix:
Optional. Filters the results to return only blobs whose names
begin with the specified prefix.
marker:
Optional. A string value that identifies the portion of the list
to be returned with the next list operation. The operation returns
a marker value within the response body if the list returned was
not complete. The marker value may then be used in a subsequent
call to request the next set of list items. The marker value is
opaque to the client.
maxresults:
Optional. Specifies the maximum number of blobs to return,
including all BlobPrefix elements. If the request does not specify
maxresults or specifies a value greater than 5,000, the server will
return up to 5,000 items. Setting maxresults to a value less than
or equal to zero results in error response code 400 (Bad Request).
include:
Optional. Specifies one or more datasets to include in the
response. To specify more than one of these options on the URI,
you must separate each option with a comma. Valid values are:
snapshots:
Specifies that snapshots should be included in the
enumeration. Snapshots are listed from oldest to newest in
the response.
metadata:
Specifies that blob metadata be returned in the response.
uncommittedblobs:
Specifies that blobs for which blocks have been uploaded,
but which have not been committed using Put Block List
(REST API), be included in the response.
copy:
Version 2012-02-12 and newer. Specifies that metadata
related to any current or previous Copy Blob operation
should be included in the response.
delimiter:
Optional. When the request includes this parameter, the operation
returns a BlobPrefix element in the response body that acts as a
placeholder for all blobs whose names begin with the same
substring up to the appearance of the delimiter character. The
delimiter may be a single character or a string.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '?restype=container&comp=list'
request.query = [
('prefix', _str_or_none(prefix)),
('delimiter', _str_or_none(delimiter)),
('marker', _str_or_none(marker)),
('maxresults', _int_or_none(maxresults)),
('include', _str_or_none(include))
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_blob_enum_results_list(response)
def set_blob_service_properties(self, storage_service_properties,
timeout=None):
'''
Sets the properties of a storage account's Blob service, including
Windows Azure Storage Analytics. You can also use this operation to
set the default request version for all incoming requests that do not
have a version specified.
storage_service_properties: a StorageServiceProperties object.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('storage_service_properties',
storage_service_properties)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.query = [('timeout', _int_or_none(timeout))]
request.body = _get_request_body(
_convert_class_to_xml(storage_service_properties))
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def get_blob_service_properties(self, timeout=None):
'''
Gets the properties of a storage account's Blob service, including
Windows Azure Storage Analytics.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response(response, StorageServiceProperties)
def get_blob_properties(self, container_name, blob_name,
x_ms_lease_id=None):
'''
Returns all user-defined metadata, standard HTTP properties, and
system properties for the blob.
container_name: Name of existing container.
blob_name: Name of existing blob.
x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'HEAD'
request.host = self._get_host()
request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict(response)
def set_blob_properties(self, container_name, blob_name,
x_ms_blob_cache_control=None,
x_ms_blob_content_type=None,
x_ms_blob_content_md5=None,
x_ms_blob_content_encoding=None,
x_ms_blob_content_language=None,
x_ms_lease_id=None):
'''
Sets system properties on the blob.
container_name: Name of existing container.
blob_name: Name of existing blob.
x_ms_blob_cache_control:
Optional. Modifies the cache control string for the blob.
x_ms_blob_content_type: Optional. Sets the blob's content type.
x_ms_blob_content_md5: Optional. Sets the blob's MD5 hash.
x_ms_blob_content_encoding: Optional. Sets the blob's content encoding.
x_ms_blob_content_language: Optional. Sets the blob's content language.
x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '/' + _str(blob_name) + '?comp=properties'
request.headers = [
('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)),
('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)),
('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)),
('x-ms-blob-content-encoding',
_str_or_none(x_ms_blob_content_encoding)),
('x-ms-blob-content-language',
_str_or_none(x_ms_blob_content_language)),
('x-ms-lease-id', _str_or_none(x_ms_lease_id))
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def put_blob(self, container_name, blob_name, blob, x_ms_blob_type,
content_encoding=None, content_language=None,
content_md5=None, cache_control=None,
x_ms_blob_content_type=None, x_ms_blob_content_encoding=None,
x_ms_blob_content_language=None, x_ms_blob_content_md5=None,
x_ms_blob_cache_control=None, x_ms_meta_name_values=None,
x_ms_lease_id=None, x_ms_blob_content_length=None,
x_ms_blob_sequence_number=None):
'''
Creates a new block blob or page blob, or updates the content of an
existing block blob.
See put_block_blob_from_* and put_page_blob_from_* for high level
functions that handle the creation and upload of large blobs with
automatic chunking and progress notifications.
container_name: Name of existing container.
blob_name: Name of blob to create or update.
blob:
For BlockBlob:
Content of blob as bytes (size < 64MB). For larger size, you
must call put_block and put_block_list to set content of blob.
For PageBlob:
Use None and call put_page to set content of blob.
x_ms_blob_type: Required. Could be BlockBlob or PageBlob.
content_encoding:
Optional. Specifies which content encodings have been applied to
the blob. This value is returned to the client when the Get Blob
(REST API) operation is performed on the blob resource. The client
can use this value when returned to decode the blob content.
content_language:
Optional. Specifies the natural languages used by this resource.
content_md5:
Optional. An MD5 hash of the blob content. This hash is used to
verify the integrity of the blob during transport. When this header
is specified, the storage service checks the hash that has arrived
with the one that was sent. If the two hashes do not match, the
operation will fail with error code 400 (Bad Request).
cache_control:
Optional. The Blob service stores this value but does not use or
modify it.
x_ms_blob_content_type: Optional. Set the blob's content type.
x_ms_blob_content_encoding: Optional. Set the blob's content encoding.
x_ms_blob_content_language: Optional. Set the blob's content language.
x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.
x_ms_blob_cache_control: Optional. Sets the blob's cache control.
x_ms_meta_name_values: A dict containing name, value for metadata.
x_ms_lease_id: Required if the blob has an active lease.
x_ms_blob_content_length:
Required for page blobs. This header specifies the maximum size
for the page blob, up to 1 TB. The page blob size must be aligned
to a 512-byte boundary.
x_ms_blob_sequence_number:
Optional. Set for page blobs only. The sequence number is a
user-controlled value that you can use to track requests. The
value of the sequence number must be between 0 and 2^63 - 1. The
default value is 0.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('x_ms_blob_type', x_ms_blob_type)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''
request.headers = [
('x-ms-blob-type', _str_or_none(x_ms_blob_type)),
('Content-Encoding', _str_or_none(content_encoding)),
('Content-Language', _str_or_none(content_language)),
('Content-MD5', _str_or_none(content_md5)),
('Cache-Control', _str_or_none(cache_control)),
('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)),
('x-ms-blob-content-encoding',
_str_or_none(x_ms_blob_content_encoding)),
('x-ms-blob-content-language',
_str_or_none(x_ms_blob_content_language)),
('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)),
('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)),
('x-ms-meta-name-values', x_ms_meta_name_values),
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
('x-ms-blob-content-length',
_str_or_none(x_ms_blob_content_length)),
('x-ms-blob-sequence-number',
_str_or_none(x_ms_blob_sequence_number))
]
request.body = _get_request_body_bytes_only('blob', blob)
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def put_block_blob_from_path(self, container_name, blob_name, file_path,
content_encoding=None, content_language=None,
content_md5=None, cache_control=None,
x_ms_blob_content_type=None,
x_ms_blob_content_encoding=None,
x_ms_blob_content_language=None,
x_ms_blob_content_md5=None,
x_ms_blob_cache_control=None,
x_ms_meta_name_values=None,
x_ms_lease_id=None, progress_callback=None):
'''
Creates a new block blob from a file path, or updates the content of an
existing block blob, with automatic chunking and progress notifications.
container_name: Name of existing container.
blob_name: Name of blob to create or update.
file_path: Path of the file to upload as the blob content.
content_encoding:
Optional. Specifies which content encodings have been applied to
the blob. This value is returned to the client when the Get Blob
(REST API) operation is performed on the blob resource. The client
can use this value when returned to decode the blob content.
content_language:
Optional. Specifies the natural languages used by this resource.
content_md5:
Optional. An MD5 hash of the blob content. This hash is used to
verify the integrity of the blob during transport. When this header
is specified, the storage service checks the hash that has arrived
with the one that was sent. If the two hashes do not match, the
operation will fail with error code 400 (Bad Request).
cache_control:
Optional. The Blob service stores this value but does not use or
modify it.
x_ms_blob_content_type: Optional. Set the blob's content type.
x_ms_blob_content_encoding: Optional. Set the blob's content encoding.
x_ms_blob_content_language: Optional. Set the blob's content language.
x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.
x_ms_blob_cache_control: Optional. Sets the blob's cache control.
x_ms_meta_name_values: A dict containing name, value for metadata.
x_ms_lease_id: Required if the blob has an active lease.
progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('file_path', file_path)
count = path.getsize(file_path)
with open(file_path, 'rb') as stream:
self.put_block_blob_from_file(container_name,
blob_name,
stream,
count,
content_encoding,
content_language,
content_md5,
cache_control,
x_ms_blob_content_type,
x_ms_blob_content_encoding,
x_ms_blob_content_language,
x_ms_blob_content_md5,
x_ms_blob_cache_control,
x_ms_meta_name_values,
x_ms_lease_id,
progress_callback)
def put_block_blob_from_file(self, container_name, blob_name, stream,
count=None, content_encoding=None,
content_language=None, content_md5=None,
cache_control=None,
x_ms_blob_content_type=None,
x_ms_blob_content_encoding=None,
x_ms_blob_content_language=None,
x_ms_blob_content_md5=None,
x_ms_blob_cache_control=None,
x_ms_meta_name_values=None,
x_ms_lease_id=None, progress_callback=None):
'''
Creates a new block blob from a file/stream, or updates the content of
an existing block blob, with automatic chunking and progress
notifications.
container_name: Name of existing container.
blob_name: Name of blob to create or update.
stream: Opened file/stream to upload as the blob content.
count:
Number of bytes to read from the stream. This is optional, but
should be supplied for optimal performance.
content_encoding:
Optional. Specifies which content encodings have been applied to
the blob. This value is returned to the client when the Get Blob
(REST API) operation is performed on the blob resource. The client
can use this value when returned to decode the blob content.
content_language:
Optional. Specifies the natural languages used by this resource.
content_md5:
Optional. An MD5 hash of the blob content. This hash is used to
verify the integrity of the blob during transport. When this header
is specified, the storage service checks the hash that has arrived
with the one that was sent. If the two hashes do not match, the
operation will fail with error code 400 (Bad Request).
cache_control:
Optional. The Blob service stores this value but does not use or
modify it.
x_ms_blob_content_type: Optional. Set the blob's content type.
x_ms_blob_content_encoding: Optional. Set the blob's content encoding.
x_ms_blob_content_language: Optional. Set the blob's content language.
x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.
x_ms_blob_cache_control: Optional. Sets the blob's cache control.
x_ms_meta_name_values: A dict containing name, value for metadata.
x_ms_lease_id: Required if the blob has an active lease.
progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('stream', stream)
if count and count < self._BLOB_MAX_DATA_SIZE:
if progress_callback:
progress_callback(0, count)
data = stream.read(count)
self.put_blob(container_name,
blob_name,
data,
'BlockBlob',
content_encoding,
content_language,
content_md5,
cache_control,
x_ms_blob_content_type,
x_ms_blob_content_encoding,
x_ms_blob_content_language,
x_ms_blob_content_md5,
x_ms_blob_cache_control,
x_ms_meta_name_values,
x_ms_lease_id)
if progress_callback:
progress_callback(count, count)
else:
if progress_callback:
progress_callback(0, count)
self.put_blob(container_name,
blob_name,
None,
'BlockBlob',
content_encoding,
content_language,
content_md5,
cache_control,
x_ms_blob_content_type,
x_ms_blob_content_encoding,
x_ms_blob_content_language,
x_ms_blob_content_md5,
x_ms_blob_cache_control,
x_ms_meta_name_values,
x_ms_lease_id)
remain_bytes = count
block_ids = []
block_index = 0
index = 0
while True:
request_count = self._BLOB_MAX_CHUNK_DATA_SIZE\
if remain_bytes is None else min(
remain_bytes,
self._BLOB_MAX_CHUNK_DATA_SIZE)
data = stream.read(request_count)
if data:
length = len(data)
index += length
remain_bytes = remain_bytes - \
length if remain_bytes else None
block_id = '{0:08d}'.format(block_index)
self.put_block(container_name, blob_name,
data, block_id, x_ms_lease_id=x_ms_lease_id)
block_ids.append(block_id)
block_index += 1
if progress_callback:
progress_callback(index, count)
else:
break
self.put_block_list(container_name, blob_name, block_ids,
content_md5, x_ms_blob_cache_control,
x_ms_blob_content_type,
x_ms_blob_content_encoding,
x_ms_blob_content_language,
x_ms_blob_content_md5,
x_ms_meta_name_values,
x_ms_lease_id)
def put_block_blob_from_bytes(self, container_name, blob_name, blob,
index=0, count=None, content_encoding=None,
content_language=None, content_md5=None,
cache_control=None,
x_ms_blob_content_type=None,
x_ms_blob_content_encoding=None,
x_ms_blob_content_language=None,
x_ms_blob_content_md5=None,
x_ms_blob_cache_control=None,
x_ms_meta_name_values=None,
x_ms_lease_id=None, progress_callback=None):
'''
Creates a new block blob from an array of bytes, or updates the content
of an existing block blob, with automatic chunking and progress
notifications.
container_name: Name of existing container.
blob_name: Name of blob to create or update.
blob: Content of blob as an array of bytes.
index: Start index in the array of bytes.
count:
Number of bytes to upload. Set to None or negative value to upload
all bytes starting from index.
content_encoding:
Optional. Specifies which content encodings have been applied to
the blob. This value is returned to the client when the Get Blob
(REST API) operation is performed on the blob resource. The client
can use this value when returned to decode the blob content.
content_language:
Optional. Specifies the natural languages used by this resource.
content_md5:
Optional. An MD5 hash of the blob content. This hash is used to
verify the integrity of the blob during transport. When this header
is specified, the storage service checks the hash that has arrived
with the one that was sent. If the two hashes do not match, the
operation will fail with error code 400 (Bad Request).
cache_control:
Optional. The Blob service stores this value but does not use or
modify it.
x_ms_blob_content_type: Optional. Set the blob's content type.
x_ms_blob_content_encoding: Optional. Set the blob's content encoding.
x_ms_blob_content_language: Optional. Set the blob's content language.
x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.
x_ms_blob_cache_control: Optional. Sets the blob's cache control.
x_ms_meta_name_values: A dict containing name, value for metadata.
x_ms_lease_id: Required if the blob has an active lease.
progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('blob', blob)
_validate_not_none('index', index)
_validate_type_bytes('blob', blob)
if index < 0:
raise TypeError(_ERROR_VALUE_NEGATIVE.format('index'))
if count is None or count < 0:
count = len(blob) - index
if count < self._BLOB_MAX_DATA_SIZE:
if progress_callback:
progress_callback(0, count)
data = blob[index: index + count]
self.put_blob(container_name,
blob_name,
data,
'BlockBlob',
content_encoding,
content_language,
content_md5,
cache_control,
x_ms_blob_content_type,
x_ms_blob_content_encoding,
x_ms_blob_content_language,
x_ms_blob_content_md5,
x_ms_blob_cache_control,
x_ms_meta_name_values,
x_ms_lease_id)
if progress_callback:
progress_callback(count, count)
else:
stream = BytesIO(blob)
stream.seek(index)
self.put_block_blob_from_file(container_name,
blob_name,
stream,
count,
content_encoding,
content_language,
content_md5,
cache_control,
x_ms_blob_content_type,
x_ms_blob_content_encoding,
x_ms_blob_content_language,
x_ms_blob_content_md5,
x_ms_blob_cache_control,
x_ms_meta_name_values,
x_ms_lease_id,
progress_callback)
def put_block_blob_from_text(self, container_name, blob_name, text,
text_encoding='utf-8',
content_encoding=None, content_language=None,
content_md5=None, cache_control=None,
x_ms_blob_content_type=None,
x_ms_blob_content_encoding=None,
x_ms_blob_content_language=None,
x_ms_blob_content_md5=None,
x_ms_blob_cache_control=None,
x_ms_meta_name_values=None,
x_ms_lease_id=None, progress_callback=None):
'''
Creates a new block blob from str/unicode, or updates the content of an
existing block blob, with automatic chunking and progress notifications.
container_name: Name of existing container.
blob_name: Name of blob to create or update.
text: Text to upload to the blob.
text_encoding: Encoding to use to convert the text to bytes.
content_encoding:
Optional. Specifies which content encodings have been applied to
the blob. This value is returned to the client when the Get Blob
(REST API) operation is performed on the blob resource. The client
can use this value when returned to decode the blob content.
content_language:
Optional. Specifies the natural languages used by this resource.
content_md5:
Optional. An MD5 hash of the blob content. This hash is used to
verify the integrity of the blob during transport. When this header
is specified, the storage service checks the hash that has arrived
with the one that was sent. If the two hashes do not match, the
operation will fail with error code 400 (Bad Request).
cache_control:
Optional. The Blob service stores this value but does not use or
modify it.
x_ms_blob_content_type: Optional. Set the blob's content type.
x_ms_blob_content_encoding: Optional. Set the blob's content encoding.
x_ms_blob_content_language: Optional. Set the blob's content language.
x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.
x_ms_blob_cache_control: Optional. Sets the blob's cache control.
x_ms_meta_name_values: A dict containing name, value for metadata.
x_ms_lease_id: Required if the blob has an active lease.
progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('text', text)
if not isinstance(text, bytes):
_validate_not_none('text_encoding', text_encoding)
text = text.encode(text_encoding)
self.put_block_blob_from_bytes(container_name,
blob_name,
text,
0,
len(text),
content_encoding,
content_language,
content_md5,
cache_control,
x_ms_blob_content_type,
x_ms_blob_content_encoding,
x_ms_blob_content_language,
x_ms_blob_content_md5,
x_ms_blob_cache_control,
x_ms_meta_name_values,
x_ms_lease_id,
progress_callback)
def put_page_blob_from_path(self, container_name, blob_name, file_path,
content_encoding=None, content_language=None,
content_md5=None, cache_control=None,
x_ms_blob_content_type=None,
x_ms_blob_content_encoding=None,
x_ms_blob_content_language=None,
x_ms_blob_content_md5=None,
x_ms_blob_cache_control=None,
x_ms_meta_name_values=None,
x_ms_lease_id=None,
x_ms_blob_sequence_number=None,
progress_callback=None):
'''
Creates a new page blob from a file path, or updates the content of an
existing page blob, with automatic chunking and progress notifications.
container_name: Name of existing container.
blob_name: Name of blob to create or update.
file_path: Path of the file to upload as the blob content.
content_encoding:
Optional. Specifies which content encodings have been applied to
the blob. This value is returned to the client when the Get Blob
(REST API) operation is performed on the blob resource. The client
can use this value when returned to decode the blob content.
content_language:
Optional. Specifies the natural languages used by this resource.
content_md5:
Optional. An MD5 hash of the blob content. This hash is used to
verify the integrity of the blob during transport. When this header
is specified, the storage service checks the hash that has arrived
with the one that was sent. If the two hashes do not match, the
operation will fail with error code 400 (Bad Request).
cache_control:
Optional. The Blob service stores this value but does not use or
modify it.
x_ms_blob_content_type: Optional. Set the blob's content type.
x_ms_blob_content_encoding: Optional. Set the blob's content encoding.
x_ms_blob_content_language: Optional. Set the blob's content language.
x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.
x_ms_blob_cache_control: Optional. Sets the blob's cache control.
x_ms_meta_name_values: A dict containing name, value for metadata.
x_ms_lease_id: Required if the blob has an active lease.
x_ms_blob_sequence_number:
Optional. Set for page blobs only. The sequence number is a
user-controlled value that you can use to track requests. The
value of the sequence number must be between 0 and 2^63 - 1. The
default value is 0.
progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('file_path', file_path)
count = path.getsize(file_path)
with open(file_path, 'rb') as stream:
self.put_page_blob_from_file(container_name,
blob_name,
stream,
count,
content_encoding,
content_language,
content_md5,
cache_control,
x_ms_blob_content_type,
x_ms_blob_content_encoding,
x_ms_blob_content_language,
x_ms_blob_content_md5,
x_ms_blob_cache_control,
x_ms_meta_name_values,
x_ms_lease_id,
x_ms_blob_sequence_number,
progress_callback)
def put_page_blob_from_file(self, container_name, blob_name, stream, count,
content_encoding=None, content_language=None,
content_md5=None, cache_control=None,
x_ms_blob_content_type=None,
x_ms_blob_content_encoding=None,
x_ms_blob_content_language=None,
x_ms_blob_content_md5=None,
x_ms_blob_cache_control=None,
x_ms_meta_name_values=None,
x_ms_lease_id=None,
x_ms_blob_sequence_number=None,
progress_callback=None):
'''
Creates a new page blob from a file/stream, or updates the content of an
existing page blob, with automatic chunking and progress notifications.
container_name: Name of existing container.
blob_name: Name of blob to create or update.
stream: Opened file/stream to upload as the blob content.
count:
Number of bytes to read from the stream. This is required, a page
blob cannot be created if the count is unknown.
content_encoding:
Optional. Specifies which content encodings have been applied to
the blob. This value is returned to the client when the Get Blob
(REST API) operation is performed on the blob resource. The client
can use this value when returned to decode the blob content.
content_language:
Optional. Specifies the natural languages used by this resource.
content_md5:
Optional. An MD5 hash of the blob content. This hash is used to
verify the integrity of the blob during transport. When this header
is specified, the storage service checks the hash that has arrived
with the one that was sent. If the two hashes do not match, the
operation will fail with error code 400 (Bad Request).
cache_control:
Optional. The Blob service stores this value but does not use or
modify it.
x_ms_blob_content_type: Optional. Set the blob's content type.
x_ms_blob_content_encoding: Optional. Set the blob's content encoding.
x_ms_blob_content_language: Optional. Set the blob's content language.
x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.
x_ms_blob_cache_control: Optional. Sets the blob's cache control.
x_ms_meta_name_values: A dict containing name, value for metadata.
x_ms_lease_id: Required if the blob has an active lease.
x_ms_blob_sequence_number:
Optional. Set for page blobs only. The sequence number is a
user-controlled value that you can use to track requests. The
value of the sequence number must be between 0 and 2^63 - 1. The
default value is 0.
progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('stream', stream)
_validate_not_none('count', count)
if count < 0:
raise TypeError(_ERROR_VALUE_NEGATIVE.format('count'))
if count % _PAGE_SIZE != 0:
raise TypeError(_ERROR_PAGE_BLOB_SIZE_ALIGNMENT.format(count))
if progress_callback:
progress_callback(0, count)
self.put_blob(container_name,
blob_name,
b'',
'PageBlob',
content_encoding,
content_language,
content_md5,
cache_control,
x_ms_blob_content_type,
x_ms_blob_content_encoding,
x_ms_blob_content_language,
x_ms_blob_content_md5,
x_ms_blob_cache_control,
x_ms_meta_name_values,
x_ms_lease_id,
count,
x_ms_blob_sequence_number)
remain_bytes = count
page_start = 0
while True:
request_count = min(remain_bytes, self._BLOB_MAX_CHUNK_DATA_SIZE)
data = stream.read(request_count)
if data:
length = len(data)
remain_bytes = remain_bytes - length
page_end = page_start + length - 1
self.put_page(container_name,
blob_name,
data,
'bytes={0}-{1}'.format(page_start, page_end),
'update',
x_ms_lease_id=x_ms_lease_id)
page_start = page_start + length
if progress_callback:
progress_callback(page_start, count)
else:
break
def put_page_blob_from_bytes(self, container_name, blob_name, blob,
index=0, count=None, content_encoding=None,
content_language=None, content_md5=None,
cache_control=None,
x_ms_blob_content_type=None,
x_ms_blob_content_encoding=None,
x_ms_blob_content_language=None,
x_ms_blob_content_md5=None,
x_ms_blob_cache_control=None,
x_ms_meta_name_values=None,
x_ms_lease_id=None,
x_ms_blob_sequence_number=None,
progress_callback=None):
'''
Creates a new page blob from an array of bytes, or updates the content
of an existing page blob, with automatic chunking and progress
notifications.
container_name: Name of existing container.
blob_name: Name of blob to create or update.
blob: Content of blob as an array of bytes.
index: Start index in the array of bytes.
count:
Number of bytes to upload. Set to None or negative value to upload
all bytes starting from index.
content_encoding:
Optional. Specifies which content encodings have been applied to
the blob. This value is returned to the client when the Get Blob
(REST API) operation is performed on the blob resource. The client
can use this value when returned to decode the blob content.
content_language:
Optional. Specifies the natural languages used by this resource.
content_md5:
Optional. An MD5 hash of the blob content. This hash is used to
verify the integrity of the blob during transport. When this header
is specified, the storage service checks the hash that has arrived
with the one that was sent. If the two hashes do not match, the
operation will fail with error code 400 (Bad Request).
cache_control:
Optional. The Blob service stores this value but does not use or
modify it.
x_ms_blob_content_type: Optional. Set the blob's content type.
x_ms_blob_content_encoding: Optional. Set the blob's content encoding.
x_ms_blob_content_language: Optional. Set the blob's content language.
x_ms_blob_content_md5: Optional. Set the blob's MD5 hash.
x_ms_blob_cache_control: Optional. Sets the blob's cache control.
x_ms_meta_name_values: A dict containing name, value for metadata.
x_ms_lease_id: Required if the blob has an active lease.
x_ms_blob_sequence_number:
Optional. Set for page blobs only. The sequence number is a
user-controlled value that you can use to track requests. The
value of the sequence number must be between 0 and 2^63 - 1. The
default value is 0.
progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('blob', blob)
_validate_type_bytes('blob', blob)
if index < 0:
raise TypeError(_ERROR_VALUE_NEGATIVE.format('index'))
if count is None or count < 0:
count = len(blob) - index
stream = BytesIO(blob)
stream.seek(index)
self.put_page_blob_from_file(container_name,
blob_name,
stream,
count,
content_encoding,
content_language,
content_md5,
cache_control,
x_ms_blob_content_type,
x_ms_blob_content_encoding,
x_ms_blob_content_language,
x_ms_blob_content_md5,
x_ms_blob_cache_control,
x_ms_meta_name_values,
x_ms_lease_id,
x_ms_blob_sequence_number,
progress_callback)
def get_blob(self, container_name, blob_name, snapshot=None,
x_ms_range=None, x_ms_lease_id=None,
x_ms_range_get_content_md5=None):
'''
Reads or downloads a blob from the system, including its metadata and
properties.
See get_blob_to_* for high level functions that handle the download
of large blobs with automatic chunking and progress notifications.
container_name: Name of existing container.
blob_name: Name of existing blob.
snapshot:
Optional. The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to retrieve.
x_ms_range:
Optional. Return only the bytes of the blob in the specified range.
x_ms_lease_id: Required if the blob has an active lease.
x_ms_range_get_content_md5:
Optional. When this header is set to true and specified together
with the Range header, the service returns the MD5 hash for the
range, as long as the range is less than or equal to 4 MB in size.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''
request.headers = [
('x-ms-range', _str_or_none(x_ms_range)),
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
('x-ms-range-get-content-md5',
_str_or_none(x_ms_range_get_content_md5))
]
request.query = [('snapshot', _str_or_none(snapshot))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request, None)
return _create_blob_result(response)
def get_blob_to_path(self, container_name, blob_name, file_path,
open_mode='wb', snapshot=None, x_ms_lease_id=None,
progress_callback=None):
'''
Downloads a blob to a file path, with automatic chunking and progress
notifications.
container_name: Name of existing container.
blob_name: Name of existing blob.
file_path: Path of file to write to.
open_mode: Mode to use when opening the file.
snapshot:
Optional. The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to retrieve.
x_ms_lease_id: Required if the blob has an active lease.
progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('file_path', file_path)
_validate_not_none('open_mode', open_mode)
with open(file_path, open_mode) as stream:
self.get_blob_to_file(container_name,
blob_name,
stream,
snapshot,
x_ms_lease_id,
progress_callback)
def get_blob_to_file(self, container_name, blob_name, stream,
snapshot=None, x_ms_lease_id=None,
progress_callback=None):
'''
Downloads a blob to a file/stream, with automatic chunking and progress
notifications.
container_name: Name of existing container.
blob_name: Name of existing blob.
stream: Opened file/stream to write to.
snapshot:
Optional. The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to retrieve.
x_ms_lease_id: Required if the blob has an active lease.
progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('stream', stream)
props = self.get_blob_properties(container_name, blob_name)
blob_size = int(props['content-length'])
if blob_size < self._BLOB_MAX_DATA_SIZE:
if progress_callback:
progress_callback(0, blob_size)
data = self.get_blob(container_name,
blob_name,
snapshot,
x_ms_lease_id=x_ms_lease_id)
stream.write(data)
if progress_callback:
progress_callback(blob_size, blob_size)
else:
if progress_callback:
progress_callback(0, blob_size)
index = 0
while index < blob_size:
chunk_range = 'bytes={0}-{1}'.format(
index,
index + self._BLOB_MAX_CHUNK_DATA_SIZE - 1)
data = self.get_blob(
container_name, blob_name, x_ms_range=chunk_range)
length = len(data)
index += length
if length > 0:
stream.write(data)
if progress_callback:
progress_callback(index, blob_size)
if length < self._BLOB_MAX_CHUNK_DATA_SIZE:
break
else:
break
def get_blob_to_bytes(self, container_name, blob_name, snapshot=None,
x_ms_lease_id=None, progress_callback=None):
'''
Downloads a blob as an array of bytes, with automatic chunking and
progress notifications.
container_name: Name of existing container.
blob_name: Name of existing blob.
snapshot:
Optional. The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to retrieve.
x_ms_lease_id: Required if the blob has an active lease.
progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
stream = BytesIO()
self.get_blob_to_file(container_name,
blob_name,
stream,
snapshot,
x_ms_lease_id,
progress_callback)
return stream.getvalue()
def get_blob_to_text(self, container_name, blob_name, text_encoding='utf-8',
snapshot=None, x_ms_lease_id=None,
progress_callback=None):
'''
Downloads a blob as unicode text, with automatic chunking and progress
notifications.
container_name: Name of existing container.
blob_name: Name of existing blob.
text_encoding: Encoding to use when decoding the blob data.
snapshot:
Optional. The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to retrieve.
x_ms_lease_id: Required if the blob has an active lease.
progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('text_encoding', text_encoding)
result = self.get_blob_to_bytes(container_name,
blob_name,
snapshot,
x_ms_lease_id,
progress_callback)
return result.decode(text_encoding)
def get_blob_metadata(self, container_name, blob_name, snapshot=None,
x_ms_lease_id=None):
'''
Returns all user-defined metadata for the specified blob or snapshot.
container_name: Name of existing container.
blob_name: Name of existing blob.
snapshot:
Optional. The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to retrieve.
x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '/' + _str(blob_name) + '?comp=metadata'
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.query = [('snapshot', _str_or_none(snapshot))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict_prefix(response, prefixes=['x-ms-meta'])
def set_blob_metadata(self, container_name, blob_name,
x_ms_meta_name_values=None, x_ms_lease_id=None):
'''
Sets user-defined metadata for the specified blob as one or more
name-value pairs.
container_name: Name of existing container.
blob_name: Name of existing blob.
x_ms_meta_name_values: Dict containing name and value pairs.
x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '/' + _str(blob_name) + '?comp=metadata'
request.headers = [
('x-ms-meta-name-values', x_ms_meta_name_values),
('x-ms-lease-id', _str_or_none(x_ms_lease_id))
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def lease_blob(self, container_name, blob_name, x_ms_lease_action,
x_ms_lease_id=None, x_ms_lease_duration=60,
x_ms_lease_break_period=None, x_ms_proposed_lease_id=None):
'''
Establishes and manages a one-minute lock on a blob for write
operations.
container_name: Name of existing container.
blob_name: Name of existing blob.
x_ms_lease_action:
Required. Possible values: acquire|renew|release|break|change
x_ms_lease_id: Required if the blob has an active lease.
x_ms_lease_duration:
Specifies the duration of the lease, in seconds, or negative one
(-1) for a lease that never expires. A non-infinite lease can be
between 15 and 60 seconds. A lease duration cannot be changed
using renew or change. For backwards compatibility, the default is
60, and the value is only used on an acquire operation.
x_ms_lease_break_period:
Optional. For a break operation, this is the proposed duration of
seconds that the lease should continue before it is broken, between
0 and 60 seconds. This break period is only used if it is shorter
than the time remaining on the lease. If longer, the time remaining
on the lease is used. A new lease will not be available before the
break period has expired, but the lease may be held for longer than
the break period. If this header does not appear with a break
operation, a fixed-duration lease breaks after the remaining lease
period elapses, and an infinite lease breaks immediately.
x_ms_proposed_lease_id:
Optional for acquire, required for change. Proposed lease ID, in a
GUID string format.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('x_ms_lease_action', x_ms_lease_action)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '/' + _str(blob_name) + '?comp=lease'
request.headers = [
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
('x-ms-lease-action', _str_or_none(x_ms_lease_action)),
('x-ms-lease-duration', _str_or_none(x_ms_lease_duration\
if x_ms_lease_action == 'acquire' else None)),
('x-ms-lease-break-period', _str_or_none(x_ms_lease_break_period)),
('x-ms-proposed-lease-id', _str_or_none(x_ms_proposed_lease_id)),
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict_filter(
response,
filter=['x-ms-lease-id', 'x-ms-lease-time'])
def snapshot_blob(self, container_name, blob_name,
x_ms_meta_name_values=None, if_modified_since=None,
if_unmodified_since=None, if_match=None,
if_none_match=None, x_ms_lease_id=None):
'''
Creates a read-only snapshot of a blob.
container_name: Name of existing container.
blob_name: Name of existing blob.
x_ms_meta_name_values: Optional. Dict containing name and value pairs.
if_modified_since: Optional. Datetime string.
if_unmodified_since: DateTime string.
if_match:
Optional. snapshot the blob only if its ETag value matches the
value specified.
if_none_match: Optional. An ETag value
x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '/' + _str(blob_name) + '?comp=snapshot'
request.headers = [
('x-ms-meta-name-values', x_ms_meta_name_values),
('If-Modified-Since', _str_or_none(if_modified_since)),
('If-Unmodified-Since', _str_or_none(if_unmodified_since)),
('If-Match', _str_or_none(if_match)),
('If-None-Match', _str_or_none(if_none_match)),
('x-ms-lease-id', _str_or_none(x_ms_lease_id))
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict_filter(
response,
filter=['x-ms-snapshot', 'etag', 'last-modified'])
def copy_blob(self, container_name, blob_name, x_ms_copy_source,
x_ms_meta_name_values=None,
x_ms_source_if_modified_since=None,
x_ms_source_if_unmodified_since=None,
x_ms_source_if_match=None, x_ms_source_if_none_match=None,
if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, x_ms_lease_id=None,
x_ms_source_lease_id=None):
'''
Copies a blob to a destination within the storage account.
container_name: Name of existing container.
blob_name: Name of existing blob.
x_ms_copy_source:
URL up to 2 KB in length that specifies a blob. A source blob in
the same account can be private, but a blob in another account
must be public or accept credentials included in this URL, such as
a Shared Access Signature. Examples:
https://myaccount.blob.core.windows.net/mycontainer/myblob
https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
x_ms_meta_name_values: Optional. Dict containing name and value pairs.
x_ms_source_if_modified_since:
Optional. An ETag value. Specify this conditional header to copy
the source blob only if its ETag matches the value specified.
x_ms_source_if_unmodified_since:
Optional. An ETag value. Specify this conditional header to copy
the blob only if its ETag does not match the value specified.
x_ms_source_if_match:
Optional. A DateTime value. Specify this conditional header to
copy the blob only if the source blob has been modified since the
specified date/time.
x_ms_source_if_none_match:
Optional. An ETag value. Specify this conditional header to copy
the source blob only if its ETag matches the value specified.
if_modified_since: Optional. Datetime string.
if_unmodified_since: DateTime string.
if_match:
Optional. Snapshot the blob only if its ETag value matches the
value specified.
if_none_match: Optional. An ETag value
x_ms_lease_id: Required if the blob has an active lease.
x_ms_source_lease_id:
Optional. Specify this to perform the Copy Blob operation only if
the lease ID given matches the active lease ID of the source blob.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('x_ms_copy_source', x_ms_copy_source)
if x_ms_copy_source.startswith('/'):
# Backwards compatibility for earlier versions of the SDK where
# the copy source can be in the following formats:
# - Blob in named container:
# /accountName/containerName/blobName
# - Snapshot in named container:
# /accountName/containerName/blobName?snapshot=<DateTime>
# - Blob in root container:
# /accountName/blobName
# - Snapshot in root container:
# /accountName/blobName?snapshot=<DateTime>
account, _, source =\
x_ms_copy_source.partition('/')[2].partition('/')
x_ms_copy_source = self.protocol + '://' + \
account + self.host_base + '/' + source
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''
request.headers = [
('x-ms-copy-source', _str_or_none(x_ms_copy_source)),
('x-ms-meta-name-values', x_ms_meta_name_values),
('x-ms-source-if-modified-since',
_str_or_none(x_ms_source_if_modified_since)),
('x-ms-source-if-unmodified-since',
_str_or_none(x_ms_source_if_unmodified_since)),
('x-ms-source-if-match', _str_or_none(x_ms_source_if_match)),
('x-ms-source-if-none-match',
_str_or_none(x_ms_source_if_none_match)),
('If-Modified-Since', _str_or_none(if_modified_since)),
('If-Unmodified-Since', _str_or_none(if_unmodified_since)),
('If-Match', _str_or_none(if_match)),
('If-None-Match', _str_or_none(if_none_match)),
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
('x-ms-source-lease-id', _str_or_none(x_ms_source_lease_id))
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict(response)
def abort_copy_blob(self, container_name, blob_name, x_ms_copy_id,
x_ms_lease_id=None):
'''
Aborts a pending copy_blob operation, and leaves a destination blob
with zero length and full metadata.
container_name: Name of destination container.
blob_name: Name of destination blob.
x_ms_copy_id:
Copy identifier provided in the x-ms-copy-id of the original
copy_blob operation.
x_ms_lease_id:
Required if the destination blob has an active infinite lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('x_ms_copy_id', x_ms_copy_id)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(container_name) + '/' + \
_str(blob_name) + '?comp=copy©id=' + \
_str(x_ms_copy_id)
request.headers = [
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
('x-ms-copy-action', 'abort'),
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def delete_blob(self, container_name, blob_name, snapshot=None,
x_ms_lease_id=None):
'''
Marks the specified blob or snapshot for deletion. The blob is later
deleted during garbage collection.
To mark a specific snapshot for deletion provide the date/time of the
snapshot via the snapshot parameter.
container_name: Name of existing container.
blob_name: Name of existing blob.
snapshot:
Optional. The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to delete.
x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(container_name) + '/' + _str(blob_name) + ''
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.query = [('snapshot', _str_or_none(snapshot))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def put_block(self, container_name, blob_name, block, blockid,
content_md5=None, x_ms_lease_id=None):
'''
Creates a new block to be committed as part of a blob.
container_name: Name of existing container.
blob_name: Name of existing blob.
block: Content of the block.
blockid:
Required. A value that identifies the block. The string must be
less than or equal to 64 bytes in size.
content_md5:
Optional. An MD5 hash of the block content. This hash is used to
verify the integrity of the blob during transport. When this
header is specified, the storage service checks the hash that has
arrived with the one that was sent.
x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('block', block)
_validate_not_none('blockid', blockid)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '/' + _str(blob_name) + '?comp=block'
request.headers = [
('Content-MD5', _str_or_none(content_md5)),
('x-ms-lease-id', _str_or_none(x_ms_lease_id))
]
request.query = [('blockid', _encode_base64(_str_or_none(blockid)))]
request.body = _get_request_body_bytes_only('block', block)
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def put_block_list(self, container_name, blob_name, block_list,
content_md5=None, x_ms_blob_cache_control=None,
x_ms_blob_content_type=None,
x_ms_blob_content_encoding=None,
x_ms_blob_content_language=None,
x_ms_blob_content_md5=None, x_ms_meta_name_values=None,
x_ms_lease_id=None):
'''
Writes a blob by specifying the list of block IDs that make up the
blob. In order to be written as part of a blob, a block must have been
successfully written to the server in a prior Put Block (REST API)
operation.
container_name: Name of existing container.
blob_name: Name of existing blob.
block_list: A str list containing the block ids.
content_md5:
Optional. An MD5 hash of the block content. This hash is used to
verify the integrity of the blob during transport. When this header
is specified, the storage service checks the hash that has arrived
with the one that was sent.
x_ms_blob_cache_control:
Optional. Sets the blob's cache control. If specified, this
property is stored with the blob and returned with a read request.
x_ms_blob_content_type:
Optional. Sets the blob's content type. If specified, this property
is stored with the blob and returned with a read request.
x_ms_blob_content_encoding:
Optional. Sets the blob's content encoding. If specified, this
property is stored with the blob and returned with a read request.
x_ms_blob_content_language:
Optional. Set the blob's content language. If specified, this
property is stored with the blob and returned with a read request.
x_ms_blob_content_md5:
Optional. An MD5 hash of the blob content. Note that this hash is
not validated, as the hashes for the individual blocks were
validated when each was uploaded.
x_ms_meta_name_values: Optional. Dict containing name and value pairs.
x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('block_list', block_list)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '/' + _str(blob_name) + '?comp=blocklist'
request.headers = [
('Content-MD5', _str_or_none(content_md5)),
('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)),
('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)),
('x-ms-blob-content-encoding',
_str_or_none(x_ms_blob_content_encoding)),
('x-ms-blob-content-language',
_str_or_none(x_ms_blob_content_language)),
('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)),
('x-ms-meta-name-values', x_ms_meta_name_values),
('x-ms-lease-id', _str_or_none(x_ms_lease_id))
]
request.body = _get_request_body(
_convert_block_list_to_xml(block_list))
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def get_block_list(self, container_name, blob_name, snapshot=None,
blocklisttype=None, x_ms_lease_id=None):
'''
Retrieves the list of blocks that have been uploaded as part of a
block blob.
container_name: Name of existing container.
blob_name: Name of existing blob.
snapshot:
Optional. Datetime to determine the time to retrieve the blocks.
blocklisttype:
Specifies whether to return the list of committed blocks, the list
of uncommitted blocks, or both lists together. Valid values are:
committed, uncommitted, or all.
x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '/' + _str(blob_name) + '?comp=blocklist'
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.query = [
('snapshot', _str_or_none(snapshot)),
('blocklisttype', _str_or_none(blocklisttype))
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _convert_response_to_block_list(response)
def put_page(self, container_name, blob_name, page, x_ms_range,
x_ms_page_write, timeout=None, content_md5=None,
x_ms_lease_id=None, x_ms_if_sequence_number_lte=None,
x_ms_if_sequence_number_lt=None,
x_ms_if_sequence_number_eq=None,
if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None):
'''
Writes a range of pages to a page blob.
container_name: Name of existing container.
blob_name: Name of existing blob.
page: Content of the page.
x_ms_range:
Required. Specifies the range of bytes to be written as a page.
Both the start and end of the range must be specified. Must be in
format: bytes=startByte-endByte. Given that pages must be aligned
with 512-byte boundaries, the start offset must be a modulus of
512 and the end offset must be a modulus of 512-1. Examples of
valid byte ranges are 0-511, 512-1023, etc.
x_ms_page_write:
Required. You may specify one of the following options:
update (lower case):
Writes the bytes specified by the request body into the
specified range. The Range and Content-Length headers must
match to perform the update.
clear (lower case):
Clears the specified range and releases the space used in
storage for that range. To clear a range, set the
Content-Length header to zero, and the Range header to a
value that indicates the range to clear, up to maximum
blob size.
timeout: the timeout parameter is expressed in seconds.
content_md5:
Optional. An MD5 hash of the page content. This hash is used to
verify the integrity of the page during transport. When this header
is specified, the storage service compares the hash of the content
that has arrived with the header value that was sent. If the two
hashes do not match, the operation will fail with error code 400
(Bad Request).
x_ms_lease_id: Required if the blob has an active lease.
x_ms_if_sequence_number_lte:
Optional. If the blob's sequence number is less than or equal to
the specified value, the request proceeds; otherwise it fails.
x_ms_if_sequence_number_lt:
Optional. If the blob's sequence number is less than the specified
value, the request proceeds; otherwise it fails.
x_ms_if_sequence_number_eq:
Optional. If the blob's sequence number is equal to the specified
value, the request proceeds; otherwise it fails.
if_modified_since:
Optional. A DateTime value. Specify this conditional header to
write the page only if the blob has been modified since the
specified date/time. If the blob has not been modified, the Blob
service fails.
if_unmodified_since:
Optional. A DateTime value. Specify this conditional header to
write the page only if the blob has not been modified since the
specified date/time. If the blob has been modified, the Blob
service fails.
if_match:
Optional. An ETag value. Specify an ETag value for this conditional
header to write the page only if the blob's ETag value matches the
value specified. If the values do not match, the Blob service fails.
if_none_match:
Optional. An ETag value. Specify an ETag value for this conditional
header to write the page only if the blob's ETag value does not
match the value specified. If the values are identical, the Blob
service fails.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('page', page)
_validate_not_none('x_ms_range', x_ms_range)
_validate_not_none('x_ms_page_write', x_ms_page_write)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '/' + _str(blob_name) + '?comp=page'
request.headers = [
('x-ms-range', _str_or_none(x_ms_range)),
('Content-MD5', _str_or_none(content_md5)),
('x-ms-page-write', _str_or_none(x_ms_page_write)),
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
('x-ms-if-sequence-number-le',
_str_or_none(x_ms_if_sequence_number_lte)),
('x-ms-if-sequence-number-lt',
_str_or_none(x_ms_if_sequence_number_lt)),
('x-ms-if-sequence-number-eq',
_str_or_none(x_ms_if_sequence_number_eq)),
('If-Modified-Since', _str_or_none(if_modified_since)),
('If-Unmodified-Since', _str_or_none(if_unmodified_since)),
('If-Match', _str_or_none(if_match)),
('If-None-Match', _str_or_none(if_none_match))
]
request.query = [('timeout', _int_or_none(timeout))]
request.body = _get_request_body_bytes_only('page', page)
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def get_page_ranges(self, container_name, blob_name, snapshot=None,
range=None, x_ms_range=None, x_ms_lease_id=None):
'''
Retrieves the page ranges for a blob.
container_name: Name of existing container.
blob_name: Name of existing blob.
snapshot:
Optional. The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to retrieve information
from.
range:
Optional. Specifies the range of bytes over which to list ranges,
inclusively. If omitted, then all ranges for the blob are returned.
x_ms_range:
Optional. Specifies the range of bytes to be written as a page.
Both the start and end of the range must be specified. Must be in
format: bytes=startByte-endByte. Given that pages must be aligned
with 512-byte boundaries, the start offset must be a modulus of
512 and the end offset must be a modulus of 512-1. Examples of
valid byte ranges are 0-511, 512-1023, etc.
x_ms_lease_id: Required if the blob has an active lease.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + \
_str(container_name) + '/' + _str(blob_name) + '?comp=pagelist'
request.headers = [
('Range', _str_or_none(range)),
('x-ms-range', _str_or_none(x_ms_range)),
('x-ms-lease-id', _str_or_none(x_ms_lease_id))
]
request.query = [('snapshot', _str_or_none(snapshot))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_blob_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_simple_list(response, PageList, PageRange, "page_ranges")
| apache-2.0 |
russellb/nova | nova/virt/disk/api.py | 1 | 11309 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# Copyright 2011, Piston Cloud Computing, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility methods to resize, repartition, and modify disk images.
Includes injection of SSH PGP keys into authorized_keys file.
"""
import json
import os
import tempfile
from nova import exception
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova import utils
from nova.virt.disk import guestfs
from nova.virt.disk import loop
from nova.virt.disk import nbd
LOG = logging.getLogger(__name__)
disk_opts = [
cfg.StrOpt('injected_network_template',
default=utils.abspath('virt/interfaces.template'),
help='Template file for injected network'),
cfg.ListOpt('img_handlers',
default=['loop', 'nbd', 'guestfs'],
help='Order of methods used to mount disk images'),
# NOTE(yamahata): ListOpt won't work because the command may include a
# comma. For example:
#
# mkfs.ext3 -O dir_index,extent -E stride=8,stripe-width=16
# --label %(fs_label)s %(target)s
#
# list arguments are comma separated and there is no way to
# escape such commas.
#
cfg.MultiStrOpt('virt_mkfs',
default=[
'default=mkfs.ext3 -L %(fs_label)s -F %(target)s',
'linux=mkfs.ext3 -L %(fs_label)s -F %(target)s',
'windows='
'mkfs.ntfs --fast --label %(fs_label)s %(target)s',
# NOTE(yamahata): vfat case
#'windows=mkfs.vfat -n %(fs_label)s %(target)s',
],
help='mkfs commands for ephemeral device. '
'The format is <os_type>=<mkfs command>'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(disk_opts)
_MKFS_COMMAND = {}
_DEFAULT_MKFS_COMMAND = None
for s in FLAGS.virt_mkfs:
# NOTE(yamahata): mkfs command may includes '=' for its options.
# So item.partition('=') doesn't work here
os_type, mkfs_command = s.split('=', 1)
if os_type:
_MKFS_COMMAND[os_type] = mkfs_command
if os_type == 'default':
_DEFAULT_MKFS_COMMAND = mkfs_command
def mkfs(os_type, fs_label, target):
mkfs_command = (_MKFS_COMMAND.get(os_type, _DEFAULT_MKFS_COMMAND) or
'') % locals()
if mkfs_command:
utils.execute(*mkfs_command.split())
def extend(image, size):
"""Increase image to size"""
file_size = os.path.getsize(image)
if file_size >= size:
return
utils.execute('qemu-img', 'resize', image, size)
# NOTE(vish): attempts to resize filesystem
utils.execute('e2fsck', '-fp', image, check_exit_code=False)
utils.execute('resize2fs', image, check_exit_code=False)
def bind(src, target, instance_name):
"""Bind device to a filesytem"""
if src:
utils.execute('touch', target, run_as_root=True)
utils.execute('mount', '-o', 'bind', src, target,
run_as_root=True)
s = os.stat(src)
cgroup_info = "c %s:%s rwm" % (os.major(s.st_rdev),
os.minor(s.st_rdev))
cgroups_path = \
"/sys/fs/cgroup/devices/sysdefault/libvirt/lxc/%s/devices.allow" \
% instance_name
utils.execute('echo', '>', cgroup_info, cgroups_path, run_as_root=True)
def unbind(target):
if target:
utils.execute('umount', target, run_as_root=True)
class _DiskImage(object):
"""Provide operations on a disk image file."""
def __init__(self, image, partition=None, use_cow=False, mount_dir=None):
# These passed to each mounter
self.image = image
self.partition = partition
self.mount_dir = mount_dir
# Internal
self._mkdir = False
self._mounter = None
self._errors = []
# As a performance tweak, don't bother trying to
# directly loopback mount a cow image.
self.handlers = FLAGS.img_handlers[:]
if use_cow and 'loop' in self.handlers:
self.handlers.remove('loop')
if not self.handlers:
raise exception.Error(_('no capable image handler configured'))
@property
def errors(self):
"""Return the collated errors from all operations."""
return '\n--\n'.join([''] + self._errors)
@staticmethod
def _handler_class(mode):
"""Look up the appropriate class to use based on MODE."""
for cls in (loop.Mount, nbd.Mount, guestfs.Mount):
if cls.mode == mode:
return cls
raise exception.Error(_("unknown disk image handler: %s" % mode))
def mount(self):
"""Mount a disk image, using the object attributes.
The first supported means provided by the mount classes is used.
True, or False is returned and the 'errors' attribute
contains any diagnostics.
"""
if self._mounter:
raise exception.Error(_('image already mounted'))
if not self.mount_dir:
self.mount_dir = tempfile.mkdtemp()
self._mkdir = True
try:
for h in self.handlers:
mounter_cls = self._handler_class(h)
mounter = mounter_cls(image=self.image,
partition=self.partition,
mount_dir=self.mount_dir)
if mounter.do_mount():
self._mounter = mounter
break
else:
LOG.debug(mounter.error)
self._errors.append(mounter.error)
finally:
if not self._mounter:
self.umount() # rmdir
return bool(self._mounter)
def umount(self):
"""Unmount a disk image from the file system."""
try:
if self._mounter:
self._mounter.do_umount()
finally:
if self._mkdir:
os.rmdir(self.mount_dir)
# Public module functions
def inject_data(image, key=None, net=None, metadata=None,
partition=None, use_cow=False):
"""Injects a ssh key and optionally net data into a disk image.
it will mount the image as a fully partitioned disk and attempt to inject
into the specified partition number.
If partition is not specified it mounts the image as a single partition.
"""
img = _DiskImage(image=image, partition=partition, use_cow=use_cow)
if img.mount():
try:
inject_data_into_fs(img.mount_dir, key, net, metadata,
utils.execute)
finally:
img.umount()
else:
raise exception.Error(img.errors)
def inject_files(image, files, partition=None, use_cow=False):
"""Injects arbitrary files into a disk image"""
img = _DiskImage(image=image, partition=partition, use_cow=use_cow)
if img.mount():
try:
for (path, contents) in files:
_inject_file_into_fs(img.mount_dir, path, contents)
finally:
img.umount()
else:
raise exception.Error(img.errors)
def setup_container(image, container_dir=None, use_cow=False):
"""Setup the LXC container.
It will mount the loopback image to the container directory in order
to create the root filesystem for the container.
LXC does not support qcow2 images yet.
"""
try:
img = _DiskImage(image=image, use_cow=use_cow, mount_dir=container_dir)
if img.mount():
return img
else:
raise exception.Error(img.errors)
except Exception, exn:
LOG.exception(_('Failed to mount filesystem: %s'), exn)
def destroy_container(img):
"""Destroy the container once it terminates.
It will umount the container that is mounted,
and delete any linked devices.
LXC does not support qcow2 images yet.
"""
try:
if img:
img.umount()
except Exception, exn:
LOG.exception(_('Failed to remove container: %s'), exn)
def inject_data_into_fs(fs, key, net, metadata, execute):
"""Injects data into a filesystem already mounted by the caller.
Virt connections can call this directly if they mount their fs
in a different way to inject_data
"""
if key:
_inject_key_into_fs(key, fs, execute=execute)
if net:
_inject_net_into_fs(net, fs, execute=execute)
if metadata:
_inject_metadata_into_fs(metadata, fs, execute=execute)
def _inject_file_into_fs(fs, path, contents):
absolute_path = os.path.join(fs, path.lstrip('/'))
parent_dir = os.path.dirname(absolute_path)
utils.execute('mkdir', '-p', parent_dir, run_as_root=True)
utils.execute('tee', absolute_path, process_input=contents,
run_as_root=True)
def _inject_metadata_into_fs(metadata, fs, execute=None):
metadata_path = os.path.join(fs, "meta.js")
metadata = dict([(m.key, m.value) for m in metadata])
utils.execute('tee', metadata_path,
process_input=json.dumps(metadata), run_as_root=True)
def _inject_key_into_fs(key, fs, execute=None):
"""Add the given public ssh key to root's authorized_keys.
key is an ssh key string.
fs is the path to the base of the filesystem into which to inject the key.
"""
sshdir = os.path.join(fs, 'root', '.ssh')
utils.execute('mkdir', '-p', sshdir, run_as_root=True)
utils.execute('chown', 'root', sshdir, run_as_root=True)
utils.execute('chmod', '700', sshdir, run_as_root=True)
keyfile = os.path.join(sshdir, 'authorized_keys')
key_data = [
'\n',
'# The following ssh key was injected by Nova',
'\n',
key.strip(),
'\n',
]
utils.execute('tee', '-a', keyfile,
process_input=''.join(key_data), run_as_root=True)
def _inject_net_into_fs(net, fs, execute=None):
"""Inject /etc/network/interfaces into the filesystem rooted at fs.
net is the contents of /etc/network/interfaces.
"""
netdir = os.path.join(os.path.join(fs, 'etc'), 'network')
utils.execute('mkdir', '-p', netdir, run_as_root=True)
utils.execute('chown', 'root:root', netdir, run_as_root=True)
utils.execute('chmod', 755, netdir, run_as_root=True)
netfile = os.path.join(netdir, 'interfaces')
utils.execute('tee', netfile, process_input=net, run_as_root=True)
| apache-2.0 |
ravwojdyla/incubator-beam | sdks/python/apache_beam/utils/profiler.py | 7 | 4624 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A profiler context manager based on cProfile.Profile objects."""
import cProfile
import logging
import os
import pstats
import StringIO
import tempfile
import time
import warnings
from threading import Timer
class Profile(object):
"""cProfile wrapper context for saving and logging profiler results."""
SORTBY = 'cumulative'
def __init__(self, profile_id, profile_location=None, log_results=False,
file_copy_fn=None):
self.stats = None
self.profile_id = str(profile_id)
self.profile_location = profile_location
self.log_results = log_results
self.file_copy_fn = file_copy_fn
def __enter__(self):
logging.info('Start profiling: %s', self.profile_id)
self.profile = cProfile.Profile()
self.profile.enable()
return self
def __exit__(self, *args):
self.profile.disable()
logging.info('Stop profiling: %s', self.profile_id)
if self.profile_location and self.file_copy_fn:
dump_location = os.path.join(
self.profile_location, 'profile',
('%s-%s' % (time.strftime('%Y-%m-%d_%H_%M_%S'), self.profile_id)))
fd, filename = tempfile.mkstemp()
self.profile.dump_stats(filename)
logging.info('Copying profiler data to: [%s]', dump_location)
self.file_copy_fn(filename, dump_location) # pylint: disable=protected-access
os.close(fd)
os.remove(filename)
if self.log_results:
s = StringIO.StringIO()
self.stats = pstats.Stats(
self.profile, stream=s).sort_stats(Profile.SORTBY)
self.stats.print_stats()
logging.info('Profiler data: [%s]', s.getvalue())
class MemoryReporter(object):
"""A memory reporter that reports the memory usage and heap profile.
Usage:::
mr = MemoryReporter(interval_second=30.0)
mr.start()
while ...
<do something>
# this will report continuously with 30 seconds between reports.
mr.stop()
NOTE: A reporter with start() should always stop(), or the parent process can
never finish.
Or simply the following which does star() and stop():
with MemoryReporter(interval_second=100):
while ...
<do some thing>
Also it could report on demand without continuous reporting.::
mr = MemoryReporter() # default interval 60s but not started.
<do something>
mr.report_once()
"""
def __init__(self, interval_second=60.0):
# guppy might not have installed. http://pypi.python.org/pypi/guppy/0.1.10
# The reporter can be set up only when guppy is installed (and guppy cannot
# be added to the required packages in setup.py, since it's not available
# in all platforms).
try:
from guppy import hpy # pylint: disable=import-error
self._hpy = hpy
self._interval_second = interval_second
self._timer = None
except ImportError:
warnings.warn('guppy is not installed; MemoryReporter not available.')
self._hpy = None
self._enabled = False
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
self.stop()
def start(self):
if self._enabled or not self._hpy:
return
self._enabled = True
def report_with_interval():
if not self._enabled:
return
self.report_once()
self._timer = Timer(self._interval_second, report_with_interval)
self._timer.start()
self._timer = Timer(self._interval_second, report_with_interval)
self._timer.start()
def stop(self):
if not self._enabled:
return
self._timer.cancel()
self._enabled = False
def report_once(self):
if not self._hpy:
return
report_start_time = time.time()
heap_profile = self._hpy().heap()
logging.info('*** MemoryReport Heap:\n %s\n MemoryReport took %.1f seconds',
heap_profile, time.time() - report_start_time)
| apache-2.0 |
averagehat/scikit-bio | skbio/sequence/_rna.py | 3 | 10887 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import skbio
from skbio.util._decorator import classproperty, overrides
from skbio.util._decorator import stable
from ._nucleotide_mixin import NucleotideMixin, _motifs as _parent_motifs
from ._iupac_sequence import IUPACSequence
class RNA(IUPACSequence, NucleotideMixin):
"""Store RNA sequence data and optional associated metadata.
Only characters in the IUPAC RNA character set [1]_ are supported.
Parameters
----------
sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Characters representing the RNA sequence itself.
metadata : dict, optional
Arbitrary metadata which applies to the entire sequence.
positional_metadata : Pandas DataFrame consumable, optional
Arbitrary per-character metadata. For example, quality data from
sequencing reads. Must be able to be passed directly to the Pandas
DataFrame constructor.
validate : bool, optional
If ``True``, validation will be performed to ensure that all sequence
characters are in the IUPAC RNA character set. If ``False``, validation
will not be performed. Turning off validation will improve runtime
performance. If invalid characters are present, however, there is
**no guarantee that operations performed on the resulting object will
work or behave as expected.** Only turn off validation if you are
certain that the sequence characters are valid. To store sequence data
that is not IUPAC-compliant, use ``Sequence``.
lowercase : bool or str, optional
If ``True``, lowercase sequence characters will be converted to
uppercase characters in order to be valid IUPAC RNA characters. If
``False``, no characters will be converted. If a str, it will be
treated as a key into the positional metadata of the object. All
lowercase characters will be converted to uppercase, and a ``True``
value will be stored in a boolean array in the positional metadata
under the key.
Attributes
----------
values
metadata
positional_metadata
alphabet
gap_chars
nondegenerate_chars
degenerate_chars
degenerate_map
complement_map
See Also
--------
DNA
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
Examples
--------
>>> from skbio import RNA
>>> RNA('ACCGAAU')
RNA
-----------------------------
Stats:
length: 7
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 42.86%
-----------------------------
0 ACCGAAU
Convert lowercase characters to uppercase:
>>> RNA('AcCGaaU', lowercase=True)
RNA
-----------------------------
Stats:
length: 7
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 42.86%
-----------------------------
0 ACCGAAU
"""
@classproperty
@stable(as_of="0.4.0")
@overrides(NucleotideMixin)
def complement_map(cls):
comp_map = {
'A': 'U', 'U': 'A', 'G': 'C', 'C': 'G', 'Y': 'R', 'R': 'Y',
'S': 'S', 'W': 'W', 'K': 'M', 'M': 'K', 'B': 'V', 'D': 'H',
'H': 'D', 'V': 'B', 'N': 'N'
}
comp_map.update({c: c for c in cls.gap_chars})
return comp_map
@classproperty
@stable(as_of="0.4.0")
@overrides(IUPACSequence)
def nondegenerate_chars(cls):
return set("ACGU")
@classproperty
@stable(as_of="0.4.0")
@overrides(IUPACSequence)
def degenerate_map(cls):
return {
"R": set("AG"), "Y": set("CU"), "M": set("AC"), "K": set("UG"),
"W": set("AU"), "S": set("GC"), "B": set("CGU"), "D": set("AGU"),
"H": set("ACU"), "V": set("ACG"), "N": set("ACGU")
}
@property
def _motifs(self):
return _motifs
@stable(as_of="0.4.0")
def translate(self, genetic_code=1, *args, **kwargs):
"""Translate RNA sequence into protein sequence.
Parameters
----------
genetic_code : int, GeneticCode, optional
Genetic code to use in translation. If ``int``, used as a table ID
to look up the corresponding NCBI genetic code.
args : tuple
Positional arguments accepted by ``GeneticCode.translate``.
kwargs : dict
Keyword arguments accepted by ``GeneticCode.translate``.
Returns
-------
Protein
Translated sequence.
See Also
--------
GeneticCode.translate
GeneticCode.from_ncbi
translate_six_frames
Notes
-----
RNA sequence's metadata are included in the translated protein
sequence. Positional metadata are not included.
Examples
--------
Translate RNA into protein using NCBI's standard genetic code (table ID
1, the default genetic code in scikit-bio):
>>> from skbio import RNA
>>> rna = RNA('AUGCCACUUUAA')
>>> rna.translate()
Protein
-----------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has non-degenerates: True
has stops: True
-----------------------------
0 MPL*
Translate the same RNA sequence using a different NCBI genetic code
(table ID 3, the yeast mitochondrial code) and specify that translation
must terminate at the first stop codon:
>>> rna.translate(3, stop='require')
Protein
-----------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has non-degenerates: True
has stops: False
-----------------------------
0 MPT
"""
if not isinstance(genetic_code, skbio.GeneticCode):
genetic_code = skbio.GeneticCode.from_ncbi(genetic_code)
return genetic_code.translate(self, *args, **kwargs)
@stable(as_of="0.4.0")
def translate_six_frames(self, genetic_code=1, *args, **kwargs):
"""Translate RNA into protein using six possible reading frames.
The six possible reading frames are:
* 1 (forward)
* 2 (forward)
* 3 (forward)
* -1 (reverse)
* -2 (reverse)
* -3 (reverse)
Translated sequences are yielded in this order.
Parameters
----------
genetic_code : int, GeneticCode, optional
Genetic code to use in translation. If ``int``, used as a table ID
to look up the corresponding NCBI genetic code.
args : tuple
Positional arguments accepted by
``GeneticCode.translate_six_frames``.
kwargs : dict
Keyword arguments accepted by ``GeneticCode.translate_six_frames``.
Yields
------
Protein
Translated sequence in the current reading frame.
See Also
--------
GeneticCode.translate_six_frames
GeneticCode.from_ncbi
translate
Notes
-----
This method is faster than (and equivalent to) performing six
independent translations using, for example:
``(seq.translate(reading_frame=rf)
for rf in GeneticCode.reading_frames)``
RNA sequence's metadata are included in each translated protein
sequence. Positional metadata are not included.
Examples
--------
Translate RNA into protein using the six possible reading frames and
NCBI's standard genetic code (table ID 1, the default genetic code in
scikit-bio):
>>> from skbio import RNA
>>> rna = RNA('AUGCCACUUUAA')
>>> for protein in rna.translate_six_frames():
... protein
... print('')
Protein
-----------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has non-degenerates: True
has stops: True
-----------------------------
0 MPL*
<BLANKLINE>
Protein
-----------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has non-degenerates: True
has stops: False
-----------------------------
0 CHF
<BLANKLINE>
Protein
-----------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has non-degenerates: True
has stops: False
-----------------------------
0 ATL
<BLANKLINE>
Protein
-----------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has non-degenerates: True
has stops: False
-----------------------------
0 LKWH
<BLANKLINE>
Protein
-----------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has non-degenerates: True
has stops: True
-----------------------------
0 *SG
<BLANKLINE>
Protein
-----------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has non-degenerates: True
has stops: False
-----------------------------
0 KVA
<BLANKLINE>
"""
if not isinstance(genetic_code, skbio.GeneticCode):
genetic_code = skbio.GeneticCode.from_ncbi(genetic_code)
return genetic_code.translate_six_frames(self, *args, **kwargs)
@overrides(IUPACSequence)
def _repr_stats(self):
"""Define custom statistics to display in the sequence's repr."""
stats = super(RNA, self)._repr_stats()
stats.append(('GC-content', '{:.2%}'.format(self.gc_content())))
return stats
_motifs = _parent_motifs.copy()
# Leave this at the bottom
_motifs.interpolate(RNA, "find_motifs")
| bsd-3-clause |
UQ-UQx/edx-platform_lti | lms/djangoapps/shoppingcart/migrations/0022_auto__add_field_registrationcoderedemption_course_enrollment__add_fiel.py | 110 | 18922 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'RegistrationCodeRedemption.course_enrollment'
db.add_column('shoppingcart_registrationcoderedemption', 'course_enrollment',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['student.CourseEnrollment'], null=True),
keep_default=False)
# Adding field 'PaidCourseRegistration.course_enrollment'
db.add_column('shoppingcart_paidcourseregistration', 'course_enrollment',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['student.CourseEnrollment'], null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'RegistrationCodeRedemption.course_enrollment'
db.delete_column('shoppingcart_registrationcoderedemption', 'course_enrollment_id')
# Deleting field 'PaidCourseRegistration.course_enrollment'
db.delete_column('shoppingcart_paidcourseregistration', 'course_enrollment_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.coupon': {
'Meta': {'object_name': 'Coupon'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 12, 19, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'percentage_discount': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'shoppingcart.couponredemption': {
'Meta': {'object_name': 'CouponRedemption'},
'coupon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Coupon']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.courseregcodeitem': {
'Meta': {'object_name': 'CourseRegCodeItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.courseregcodeitemannotation': {
'Meta': {'object_name': 'CourseRegCodeItemAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'shoppingcart.courseregistrationcode': {
'Meta': {'object_name': 'CourseRegistrationCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 12, 19, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_by_user'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Invoice']", 'null': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'purchase_order'", 'null': 'True', 'to': "orm['shoppingcart.Order']"})
},
'shoppingcart.donation': {
'Meta': {'object_name': 'Donation', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'donation_type': ('django.db.models.fields.CharField', [], {'default': "'general'", 'max_length': '32'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.donationconfiguration': {
'Meta': {'object_name': 'DonationConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'shoppingcart.invoice': {
'Meta': {'object_name': 'Invoice'},
'address_line_1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address_line_2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'address_line_3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'company_contact_email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'company_contact_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'customer_reference_number': ('django.db.models.fields.CharField', [], {'max_length': '63', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_reference': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'recipient_email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'recipient_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'total_amount': ('django.db.models.fields.FloatField', [], {}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True'})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'company_contact_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'company_contact_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'customer_reference_number': ('django.db.models.fields.CharField', [], {'max_length': '63', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_type': ('django.db.models.fields.CharField', [], {'default': "'personal'", 'max_length': '32'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'recipient_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'recipient_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'refunded_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'fulfilled_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'list_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '30', 'decimal_places': '2'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'refund_requested_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'report_comments': ('django.db.models.fields.TextField', [], {'default': "''"}),
'service_fee': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32', 'db_index': 'True'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']", 'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.paidcourseregistrationannotation': {
'Meta': {'object_name': 'PaidCourseRegistrationAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'shoppingcart.registrationcoderedemption': {
'Meta': {'object_name': 'RegistrationCodeRedemption'},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']", 'null': 'True'}),
'redeemed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 12, 19, 0, 0)', 'null': 'True'}),
'redeemed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'registration_code': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.CourseRegistrationCode']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart'] | agpl-3.0 |
weimingtom/python-for-android | python-modules/twisted/twisted/names/authority.py | 49 | 11053 | # -*- test-case-name: twisted.names.test.test_names -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Authoritative resolvers.
"""
import os
import time
from twisted.names import dns
from twisted.internet import defer
from twisted.python import failure
import common
def getSerial(filename = '/tmp/twisted-names.serial'):
"""Return a monotonically increasing (across program runs) integer.
State is stored in the given file. If it does not exist, it is
created with rw-/---/--- permissions.
"""
serial = time.strftime('%Y%m%d')
o = os.umask(0177)
try:
if not os.path.exists(filename):
f = file(filename, 'w')
f.write(serial + ' 0')
f.close()
finally:
os.umask(o)
serialFile = file(filename, 'r')
lastSerial, ID = serialFile.readline().split()
ID = (lastSerial == serial) and (int(ID) + 1) or 0
serialFile.close()
serialFile = file(filename, 'w')
serialFile.write('%s %d' % (serial, ID))
serialFile.close()
serial = serial + ('%02d' % (ID,))
return serial
#class LookupCacherMixin(object):
# _cache = None
#
# def _lookup(self, name, cls, type, timeout = 10):
# if not self._cache:
# self._cache = {}
# self._meth = super(LookupCacherMixin, self)._lookup
#
# if self._cache.has_key((name, cls, type)):
# return self._cache[(name, cls, type)]
# else:
# r = self._meth(name, cls, type, timeout)
# self._cache[(name, cls, type)] = r
# return r
class FileAuthority(common.ResolverBase):
"""An Authority that is loaded from a file."""
soa = None
records = None
def __init__(self, filename):
common.ResolverBase.__init__(self)
self.loadFile(filename)
self._cache = {}
def __setstate__(self, state):
self.__dict__ = state
# print 'setstate ', self.soa
def _lookup(self, name, cls, type, timeout = None):
cnames = []
results = []
authority = []
additional = []
default_ttl = max(self.soa[1].minimum, self.soa[1].expire)
domain_records = self.records.get(name.lower())
if domain_records:
for record in domain_records:
if record.ttl is not None:
ttl = record.ttl
else:
ttl = default_ttl
if record.TYPE == dns.NS and name.lower() != self.soa[0].lower():
# NS record belong to a child zone: this is a referral. As
# NS records are authoritative in the child zone, ours here
# are not. RFC 2181, section 6.1.
authority.append(
dns.RRHeader(name, record.TYPE, dns.IN, ttl, record, auth=False)
)
elif record.TYPE == type or type == dns.ALL_RECORDS:
results.append(
dns.RRHeader(name, record.TYPE, dns.IN, ttl, record, auth=True)
)
if record.TYPE == dns.CNAME:
cnames.append(
dns.RRHeader(name, record.TYPE, dns.IN, ttl, record, auth=True)
)
if not results:
results = cnames
for record in results + authority:
section = {dns.NS: additional, dns.CNAME: results, dns.MX: additional}.get(record.type)
if section is not None:
n = str(record.payload.name)
for rec in self.records.get(n.lower(), ()):
if rec.TYPE == dns.A:
section.append(
dns.RRHeader(n, dns.A, dns.IN, rec.ttl or default_ttl, rec, auth=True)
)
if not results and not authority:
# Empty response. Include SOA record to allow clients to cache
# this response. RFC 1034, sections 3.7 and 4.3.4, and RFC 2181
# section 7.1.
authority.append(
dns.RRHeader(self.soa[0], dns.SOA, dns.IN, ttl, self.soa[1], auth=True)
)
return defer.succeed((results, authority, additional))
else:
if name.lower().endswith(self.soa[0].lower()):
# We are the authority and we didn't find it. Goodbye.
return defer.fail(failure.Failure(dns.AuthoritativeDomainError(name)))
return defer.fail(failure.Failure(dns.DomainError(name)))
def lookupZone(self, name, timeout = 10):
if self.soa[0].lower() == name.lower():
# Wee hee hee hooo yea
default_ttl = max(self.soa[1].minimum, self.soa[1].expire)
if self.soa[1].ttl is not None:
soa_ttl = self.soa[1].ttl
else:
soa_ttl = default_ttl
results = [dns.RRHeader(self.soa[0], dns.SOA, dns.IN, soa_ttl, self.soa[1], auth=True)]
for (k, r) in self.records.items():
for rec in r:
if rec.ttl is not None:
ttl = rec.ttl
else:
ttl = default_ttl
if rec.TYPE != dns.SOA:
results.append(dns.RRHeader(k, rec.TYPE, dns.IN, ttl, rec, auth=True))
results.append(results[0])
return defer.succeed((results, (), ()))
return defer.fail(failure.Failure(dns.DomainError(name)))
def _cbAllRecords(self, results):
ans, auth, add = [], [], []
for res in results:
if res[0]:
ans.extend(res[1][0])
auth.extend(res[1][1])
add.extend(res[1][2])
return ans, auth, add
class PySourceAuthority(FileAuthority):
"""A FileAuthority that is built up from Python source code."""
def loadFile(self, filename):
g, l = self.setupConfigNamespace(), {}
execfile(filename, g, l)
if not l.has_key('zone'):
raise ValueError, "No zone defined in " + filename
self.records = {}
for rr in l['zone']:
if isinstance(rr[1], dns.Record_SOA):
self.soa = rr
self.records.setdefault(rr[0].lower(), []).append(rr[1])
def wrapRecord(self, type):
return lambda name, *arg, **kw: (name, type(*arg, **kw))
def setupConfigNamespace(self):
r = {}
items = dns.__dict__.iterkeys()
for record in [x for x in items if x.startswith('Record_')]:
type = getattr(dns, record)
f = self.wrapRecord(type)
r[record[len('Record_'):]] = f
return r
class BindAuthority(FileAuthority):
"""An Authority that loads BIND configuration files"""
def loadFile(self, filename):
self.origin = os.path.basename(filename) + '.' # XXX - this might suck
lines = open(filename).readlines()
lines = self.stripComments(lines)
lines = self.collapseContinuations(lines)
self.parseLines(lines)
def stripComments(self, lines):
return [
a.find(';') == -1 and a or a[:a.find(';')] for a in [
b.strip() for b in lines
]
]
def collapseContinuations(self, lines):
L = []
state = 0
for line in lines:
if state == 0:
if line.find('(') == -1:
L.append(line)
else:
L.append(line[:line.find('(')])
state = 1
else:
if line.find(')') != -1:
L[-1] += ' ' + line[:line.find(')')]
state = 0
else:
L[-1] += ' ' + line
lines = L
L = []
for line in lines:
L.append(line.split())
return filter(None, L)
def parseLines(self, lines):
TTL = 60 * 60 * 3
ORIGIN = self.origin
self.records = {}
for (line, index) in zip(lines, range(len(lines))):
if line[0] == '$TTL':
TTL = dns.str2time(line[1])
elif line[0] == '$ORIGIN':
ORIGIN = line[1]
elif line[0] == '$INCLUDE': # XXX - oh, fuck me
raise NotImplementedError('$INCLUDE directive not implemented')
elif line[0] == '$GENERATE':
raise NotImplementedError('$GENERATE directive not implemented')
else:
self.parseRecordLine(ORIGIN, TTL, line)
def addRecord(self, owner, ttl, type, domain, cls, rdata):
if not domain.endswith('.'):
domain = domain + '.' + owner
else:
domain = domain[:-1]
f = getattr(self, 'class_%s' % cls, None)
if f:
f(ttl, type, domain, rdata)
else:
raise NotImplementedError, "Record class %r not supported" % cls
def class_IN(self, ttl, type, domain, rdata):
record = getattr(dns, 'Record_%s' % type, None)
if record:
r = record(*rdata)
r.ttl = ttl
self.records.setdefault(domain.lower(), []).append(r)
print 'Adding IN Record', domain, ttl, r
if type == 'SOA':
self.soa = (domain, r)
else:
raise NotImplementedError, "Record type %r not supported" % type
#
# This file ends here. Read no further.
#
def parseRecordLine(self, origin, ttl, line):
MARKERS = dns.QUERY_CLASSES.values() + dns.QUERY_TYPES.values()
cls = 'IN'
owner = origin
if line[0] == '@':
line = line[1:]
owner = origin
# print 'default owner'
elif not line[0].isdigit() and line[0] not in MARKERS:
owner = line[0]
line = line[1:]
# print 'owner is ', owner
if line[0].isdigit() or line[0] in MARKERS:
domain = owner
owner = origin
# print 'woops, owner is ', owner, ' domain is ', domain
else:
domain = line[0]
line = line[1:]
# print 'domain is ', domain
if line[0] in dns.QUERY_CLASSES.values():
cls = line[0]
line = line[1:]
# print 'cls is ', cls
if line[0].isdigit():
ttl = int(line[0])
line = line[1:]
# print 'ttl is ', ttl
elif line[0].isdigit():
ttl = int(line[0])
line = line[1:]
# print 'ttl is ', ttl
if line[0] in dns.QUERY_CLASSES.values():
cls = line[0]
line = line[1:]
# print 'cls is ', cls
type = line[0]
# print 'type is ', type
rdata = line[1:]
# print 'rdata is ', rdata
self.addRecord(owner, ttl, type, domain, cls, rdata)
| apache-2.0 |
bgris/ODL_bgris | lib/python3.5/idlelib/StackViewer.py | 10 | 4426 | import os
import sys
import linecache
import re
import tkinter as tk
from idlelib.TreeWidget import TreeNode, TreeItem, ScrolledCanvas
from idlelib.ObjectBrowser import ObjectTreeItem, make_objecttreeitem
from idlelib.PyShell import PyShellFileList
def StackBrowser(root, flist=None, tb=None, top=None):
if top is None:
top = tk.Toplevel(root)
sc = ScrolledCanvas(top, bg="white", highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
item = StackTreeItem(flist, tb)
node = TreeNode(sc.canvas, None, item)
node.expand()
class StackTreeItem(TreeItem):
def __init__(self, flist=None, tb=None):
self.flist = flist
self.stack = self.get_stack(tb)
self.text = self.get_exception()
def get_stack(self, tb):
if tb is None:
tb = sys.last_traceback
stack = []
if tb and tb.tb_frame is None:
tb = tb.tb_next
while tb is not None:
stack.append((tb.tb_frame, tb.tb_lineno))
tb = tb.tb_next
return stack
def get_exception(self):
type = sys.last_type
value = sys.last_value
if hasattr(type, "__name__"):
type = type.__name__
s = str(type)
if value is not None:
s = s + ": " + str(value)
return s
def GetText(self):
return self.text
def GetSubList(self):
sublist = []
for info in self.stack:
item = FrameTreeItem(info, self.flist)
sublist.append(item)
return sublist
class FrameTreeItem(TreeItem):
def __init__(self, info, flist):
self.info = info
self.flist = flist
def GetText(self):
frame, lineno = self.info
try:
modname = frame.f_globals["__name__"]
except:
modname = "?"
code = frame.f_code
filename = code.co_filename
funcname = code.co_name
sourceline = linecache.getline(filename, lineno)
sourceline = sourceline.strip()
if funcname in ("?", "", None):
item = "%s, line %d: %s" % (modname, lineno, sourceline)
else:
item = "%s.%s(...), line %d: %s" % (modname, funcname,
lineno, sourceline)
return item
def GetSubList(self):
frame, lineno = self.info
sublist = []
if frame.f_globals is not frame.f_locals:
item = VariablesTreeItem("<locals>", frame.f_locals, self.flist)
sublist.append(item)
item = VariablesTreeItem("<globals>", frame.f_globals, self.flist)
sublist.append(item)
return sublist
def OnDoubleClick(self):
if self.flist:
frame, lineno = self.info
filename = frame.f_code.co_filename
if os.path.isfile(filename):
self.flist.gotofileline(filename, lineno)
class VariablesTreeItem(ObjectTreeItem):
def GetText(self):
return self.labeltext
def GetLabelText(self):
return None
def IsExpandable(self):
return len(self.object) > 0
def GetSubList(self):
sublist = []
for key in self.object.keys():
try:
value = self.object[key]
except KeyError:
continue
def setfunction(value, key=key, object=self.object):
object[key] = value
item = make_objecttreeitem(key + " =", value, setfunction)
sublist.append(item)
return sublist
def keys(self): # unused, left for possible 3rd party use
return list(self.object.keys())
def _stack_viewer(parent):
root = tk.Tk()
root.title("Test StackViewer")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
flist = PyShellFileList(root)
try: # to obtain a traceback object
intentional_name_error
except NameError:
exc_type, exc_value, exc_tb = sys.exc_info()
# inject stack trace to sys
sys.last_type = exc_type
sys.last_value = exc_value
sys.last_traceback = exc_tb
StackBrowser(root, flist=flist, top=root, tb=exc_tb)
# restore sys to original state
del sys.last_type
del sys.last_value
del sys.last_traceback
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(_stack_viewer)
| gpl-3.0 |
meganbkratz/acq4 | acq4/util/InterfaceCombo.py | 3 | 4622 | # -*- coding: utf-8 -*-
from PyQt4 import QtCore, QtGui
from acq4.Manager import getManager
import acq4.pyqtgraph.parametertree as parametertree
import acq4.pyqtgraph.parametertree.parameterTypes as ptypes
### TODO: inherit from util/ComboBox instead.
class InterfaceCombo(QtGui.QComboBox):
"""
ComboBox that displays a list of objects registered with the ACQ4 interface directory.
This is used, for example, to allow the user to select from an up-to-date list of devices, modules, etc.
"""
def __init__(self, parent=None, types=None):
self.dir = getManager().interfaceDir
self.interfaceMap = []
self.preferred = None
QtGui.QComboBox.__init__(self, parent)
#QtCore.QObject.connect(self.dir, QtCore.SIGNAL('interfaceListChanged'), self.updateList)
self.dir.sigInterfaceListChanged.connect(self.updateList)
if types is not None:
self.setTypes(types)
def setTypes(self, types):
if isinstance(types, basestring):
types = [types]
self.types = types
self.updateList()
def updateList(self):
ints = self.dir.listInterfaces(self.types)
self.interfaceMap = []
objects = set()
try:
preferred = self.preferredValue()
current = self.currentText()
except RuntimeError:
return # This happens when the combo has been deleted, but we are still receiving signals.
try:
self.blockSignals(True)
self.clear()
man = getManager()
for typ,intList in ints.iteritems():
for name in intList:
obj = man.getInterface(typ, name)
if obj in objects:
continue
objects.add(obj)
self.interfaceMap.append((typ, name))
self.addItem(name)
if name == preferred:
self.setCurrentIndex(self.count()-1)
finally:
self.blockSignals(False)
if self.currentText() != current:
self.currentIndexChanged.emit(self.currentIndex())
def preferredValue(self):
## return the value we would most like to have selected if available
if self.preferred is not None:
return self.preferred
else:
return self.currentText()
def getSelectedObj(self):
#if self.currentIndex() == 0:
#return None
if self.currentIndex() == -1:
return None
return self.dir.getInterface(*self.interfaceMap[self.currentIndex()])
def currentText(self):
return str(QtGui.QComboBox.currentText(self))
def setCurrentText(self, text):
"""Set the current item by name"""
self.preferred = text
index = self.findText(text)
if index == -1:
return
self.setCurrentIndex(index)
def setCurrentObject(self, obj):
pass
def widgetGroupInterface(self):
return (self.currentIndexChanged, self.currentText, self.setCurrentText)
#class InterfaceParameterItem(ptypes.ListParameterItem):
#def makeWidget(self):
#w = InterfaceCombo(types=self.param.opts['interfaceTypes'])
#w.setMaximumHeight(20) ## set to match height of spin box and line edit
#w.sigChanged = w.currentIndexChanged
#w.value = self.value
#w.setValue = self.setValue
#self.widget = w
#return self.widget
class InterfaceParameter(ptypes.ListParameter):
type = 'interface'
itemClass = ptypes.ListParameterItem
def __init__(self, **args):
ptypes.ListParameter.__init__(self, **args)
self.dir = getManager().interfaceDir
self.dir.sigInterfaceListChanged.connect(self.updateList)
self.updateList()
def setOpts(self, **args):
ptypes.ListParameter.setOpts(self, **args)
if 'interfaceTypes' in args:
self.updateList()
def updateList(self):
ints = self.dir.listInterfaces(self.opts['interfaceTypes'])
if isinstance(ints, dict):
interfaces = []
for i in ints.itervalues():
interfaces.extend(i)
else:
interfaces = ints
#print "set limits:", ints
self.setLimits(tuple(interfaces))
parametertree.registerParameterType('interface', InterfaceParameter, override=True) | mit |
FederatedAI/FATE | python/fate_flow/operation/job_tracker.py | 1 | 29929 | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import operator
import copy
from typing import List
from fate_arch.common import EngineType, Party
from fate_arch.computing import ComputingEngine
from fate_arch.federation import FederationEngine
from fate_arch.storage import StorageEngine
from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64, json_loads
from fate_arch.common.log import schedule_logger
from fate_flow.db.db_models import (DB, Job, TrackingMetric, TrackingOutputDataInfo,
ComponentSummary, MachineLearningModelInfo as MLModel)
from fate_flow.entity.metric import Metric, MetricMeta
from fate_flow.entity.runtime_config import RuntimeConfig
from fate_flow.pipelined_model import pipelined_model
from fate_arch import storage
from fate_flow.utils import model_utils, job_utils, data_utils
from fate_arch import session
from fate_flow.entity.types import RunParameters
class Tracker(object):
"""
Tracker for Job/Task/Metric
"""
METRIC_DATA_PARTITION = 48
METRIC_LIST_PARTITION = 48
JOB_VIEW_PARTITION = 8
def __init__(self, job_id: str, role: str, party_id: int,
model_id: str = None,
model_version: str = None,
component_name: str = None,
component_module_name: str = None,
task_id: str = None,
task_version: int = None,
job_parameters: RunParameters = None
):
self.job_id = job_id
self.role = role
self.party_id = party_id
self.model_id = model_id
self.party_model_id = model_utils.gen_party_model_id(model_id=model_id, role=role, party_id=party_id)
self.model_version = model_version
self.pipelined_model = None
if self.party_model_id and self.model_version:
self.pipelined_model = pipelined_model.PipelinedModel(model_id=self.party_model_id,
model_version=self.model_version)
self.component_name = component_name if component_name else job_utils.job_virtual_component_name()
self.module_name = component_module_name if component_module_name else job_utils.job_virtual_component_module_name()
self.task_id = task_id
self.task_version = task_version
self.job_parameters = job_parameters
def save_metric_data(self, metric_namespace: str, metric_name: str, metrics: List[Metric], job_level=False):
schedule_logger(self.job_id).info(
'save job {} component {} on {} {} {} {} metric data'.format(self.job_id, self.component_name, self.role,
self.party_id, metric_namespace, metric_name))
kv = []
for metric in metrics:
kv.append((metric.key, metric.value))
self.insert_metrics_into_db(metric_namespace, metric_name, 1, kv, job_level)
def get_job_metric_data(self, metric_namespace: str, metric_name: str):
return self.read_metric_data(metric_namespace=metric_namespace, metric_name=metric_name, job_level=True)
def get_metric_data(self, metric_namespace: str, metric_name: str):
return self.read_metric_data(metric_namespace=metric_namespace, metric_name=metric_name, job_level=False)
@DB.connection_context()
def read_metric_data(self, metric_namespace: str, metric_name: str, job_level=False):
metrics = []
for k, v in self.read_metrics_from_db(metric_namespace, metric_name, 1, job_level):
metrics.append(Metric(key=k, value=v))
return metrics
def save_metric_meta(self, metric_namespace: str, metric_name: str, metric_meta: MetricMeta,
job_level: bool = False):
schedule_logger(self.job_id).info(
'save job {} component {} on {} {} {} {} metric meta'.format(self.job_id, self.component_name, self.role,
self.party_id, metric_namespace, metric_name))
self.insert_metrics_into_db(metric_namespace, metric_name, 0, metric_meta.to_dict().items(), job_level)
@DB.connection_context()
def get_metric_meta(self, metric_namespace: str, metric_name: str, job_level: bool = False):
kv = dict()
for k, v in self.read_metrics_from_db(metric_namespace, metric_name, 0, job_level):
kv[k] = v
return MetricMeta(name=kv.get('name'), metric_type=kv.get('metric_type'), extra_metas=kv)
def log_job_view(self, view_data: dict):
self.insert_metrics_into_db('job', 'job_view', 2, view_data.items(), job_level=True)
@DB.connection_context()
def get_job_view(self):
view_data = {}
for k, v in self.read_metrics_from_db('job', 'job_view', 2, job_level=True):
view_data[k] = v
return view_data
def save_output_data(self, computing_table, output_storage_engine, output_storage_address: dict,
output_table_namespace=None, output_table_name=None):
if computing_table:
if not output_table_namespace or not output_table_name:
output_table_namespace, output_table_name = data_utils.default_output_table_info(task_id=self.task_id, task_version=self.task_version)
schedule_logger(self.job_id).info(
'persisting the component output temporary table to {} {}'.format(output_table_namespace,
output_table_name))
partitions = computing_table.partitions
schedule_logger(self.job_id).info('output data table partitions is {}'.format(partitions))
address_dict = output_storage_address.copy()
if output_storage_engine == StorageEngine.EGGROLL:
address_dict.update({"name": output_table_name, "namespace": output_table_namespace, "storage_type": storage.EggRollStorageType.ROLLPAIR_LMDB})
elif output_storage_engine == StorageEngine.STANDALONE:
address_dict.update({"name": output_table_name, "namespace": output_table_namespace, "storage_type": storage.StandaloneStorageType.ROLLPAIR_LMDB})
elif output_storage_engine == StorageEngine.HDFS:
address_dict.update({"path": data_utils.default_output_fs_path(name=output_table_name, namespace=output_table_namespace, prefix=address_dict.get("path_prefix"))})
else:
raise RuntimeError(f"{output_storage_engine} storage is not supported")
address = storage.StorageTableMeta.create_address(storage_engine=output_storage_engine, address_dict=address_dict)
schema = {}
# persistent table
computing_table.save(address, schema=schema, partitions=partitions)
part_of_data = []
part_of_limit = 100
for k, v in computing_table.collect():
part_of_data.append((k, v))
part_of_limit -= 1
if part_of_limit == 0:
break
table_count = computing_table.count()
table_meta = storage.StorageTableMeta(name=output_table_name, namespace=output_table_namespace, new=True)
table_meta.address = address
table_meta.partitions = computing_table.partitions
table_meta.engine = output_storage_engine
table_meta.type = storage.EggRollStorageType.ROLLPAIR_LMDB
table_meta.schema = schema
table_meta.part_of_data = part_of_data
table_meta.count = table_count
table_meta.create()
return output_table_namespace, output_table_name
else:
schedule_logger(self.job_id).info('task id {} output data table is none'.format(self.task_id))
return None, None
def get_output_data_table(self, output_data_infos):
"""
Get component output data table, will run in the task executor process
:param output_data_infos:
:return:
"""
output_tables_meta = {}
if output_data_infos:
for output_data_info in output_data_infos:
schedule_logger(self.job_id).info("Get task {} {} output table {} {}".format(output_data_info.f_task_id, output_data_info.f_task_version, output_data_info.f_table_namespace, output_data_info.f_table_name))
data_table_meta = storage.StorageTableMeta(name=output_data_info.f_table_name, namespace=output_data_info.f_table_namespace)
output_tables_meta[output_data_info.f_data_name] = data_table_meta
return output_tables_meta
def init_pipelined_model(self):
self.pipelined_model.create_pipelined_model()
def save_output_model(self, model_buffers: dict, model_alias: str):
if model_buffers:
self.pipelined_model.save_component_model(component_name=self.component_name,
component_module_name=self.module_name,
model_alias=model_alias,
model_buffers=model_buffers)
def get_output_model(self, model_alias):
model_buffers = self.pipelined_model.read_component_model(component_name=self.component_name,
model_alias=model_alias)
return model_buffers
def collect_model(self):
model_buffers = self.pipelined_model.collect_models()
return model_buffers
def save_pipelined_model(self, pipelined_buffer_object):
self.save_output_model({'Pipeline': pipelined_buffer_object}, 'pipeline')
self.pipelined_model.save_pipeline(pipelined_buffer_object=pipelined_buffer_object)
def get_component_define(self):
return self.pipelined_model.get_component_define(component_name=self.component_name)
@DB.connection_context()
def insert_metrics_into_db(self, metric_namespace: str, metric_name: str, data_type: int, kv, job_level=False):
try:
tracking_metric = self.get_dynamic_db_model(TrackingMetric, self.job_id)()
tracking_metric.f_job_id = self.job_id
tracking_metric.f_component_name = (self.component_name if not job_level else job_utils.job_virtual_component_name())
tracking_metric.f_task_id = self.task_id
tracking_metric.f_task_version = self.task_version
tracking_metric.f_role = self.role
tracking_metric.f_party_id = self.party_id
tracking_metric.f_metric_namespace = metric_namespace
tracking_metric.f_metric_name = metric_name
tracking_metric.f_type = data_type
default_db_source = tracking_metric.to_json()
tracking_metric_data_source = []
for k, v in kv:
db_source = default_db_source.copy()
db_source['f_key'] = serialize_b64(k)
db_source['f_value'] = serialize_b64(v)
db_source['f_create_time'] = current_timestamp()
tracking_metric_data_source.append(db_source)
self.bulk_insert_into_db(self.get_dynamic_db_model(TrackingMetric, self.job_id),
tracking_metric_data_source)
except Exception as e:
schedule_logger(self.job_id).exception("An exception where inserted metric {} of metric namespace: {} to database:\n{}".format(
metric_name,
metric_namespace,
e
))
@DB.connection_context()
def insert_summary_into_db(self, summary_data: dict):
try:
summary_model = self.get_dynamic_db_model(ComponentSummary, self.job_id)
DB.create_tables([summary_model])
summary_obj = summary_model.get_or_none(
summary_model.f_job_id == self.job_id,
summary_model.f_component_name == self.component_name,
summary_model.f_role == self.role,
summary_model.f_party_id == self.party_id,
summary_model.f_task_id == self.task_id,
summary_model.f_task_version == self.task_version
)
if summary_obj:
summary_obj.f_summary = serialize_b64(summary_data, to_str=True)
summary_obj.f_update_time = current_timestamp()
summary_obj.save()
else:
self.get_dynamic_db_model(ComponentSummary, self.job_id).create(
f_job_id=self.job_id,
f_component_name=self.component_name,
f_role=self.role,
f_party_id=self.party_id,
f_task_id=self.task_id,
f_task_version=self.task_version,
f_summary=serialize_b64(summary_data, to_str=True),
f_create_time=current_timestamp()
)
except Exception as e:
schedule_logger(self.job_id).exception("An exception where querying summary job id: {} "
"component name: {} to database:\n{}".format(
self.job_id, self.component_name, e)
)
@DB.connection_context()
def read_summary_from_db(self):
try:
summary_model = self.get_dynamic_db_model(ComponentSummary, self.job_id)
summary = summary_model.get_or_none(
summary_model.f_job_id == self.job_id,
summary_model.f_component_name == self.component_name,
summary_model.f_role == self.role,
summary_model.f_party_id == self.party_id
)
if summary:
cpn_summary = deserialize_b64(summary.f_summary)
else:
cpn_summary = ""
except Exception as e:
schedule_logger(self.job_id).exception(e)
raise e
return cpn_summary
def log_output_data_info(self, data_name: str, table_namespace: str, table_name: str):
self.insert_output_data_info_into_db(data_name=data_name, table_namespace=table_namespace, table_name=table_name)
@DB.connection_context()
def insert_output_data_info_into_db(self, data_name: str, table_namespace: str, table_name: str):
try:
tracking_output_data_info = self.get_dynamic_db_model(TrackingOutputDataInfo, self.job_id)()
tracking_output_data_info.f_job_id = self.job_id
tracking_output_data_info.f_component_name = self.component_name
tracking_output_data_info.f_task_id = self.task_id
tracking_output_data_info.f_task_version = self.task_version
tracking_output_data_info.f_data_name = data_name
tracking_output_data_info.f_role = self.role
tracking_output_data_info.f_party_id = self.party_id
tracking_output_data_info.f_table_namespace = table_namespace
tracking_output_data_info.f_table_name = table_name
tracking_output_data_info.f_create_time = current_timestamp()
self.bulk_insert_into_db(self.get_dynamic_db_model(TrackingOutputDataInfo, self.job_id),
[tracking_output_data_info.to_json()])
except Exception as e:
schedule_logger(self.job_id).exception("An exception where inserted output data info {} {} {} to database:\n{}".format(
data_name,
table_namespace,
table_name,
e
))
@DB.connection_context()
def bulk_insert_into_db(self, model, data_source):
try:
try:
DB.create_tables([model])
except Exception as e:
schedule_logger(self.job_id).exception(e)
batch_size = 50 if RuntimeConfig.USE_LOCAL_DATABASE else 1000
for i in range(0, len(data_source), batch_size):
with DB.atomic():
model.insert_many(data_source[i:i+batch_size]).execute()
return len(data_source)
except Exception as e:
schedule_logger(self.job_id).exception(e)
return 0
def save_as_table(self, computing_table, name, namespace):
self.save_output_data(computing_table=computing_table,
output_storage_engine=self.job_parameters.storage_engine,
output_storage_address=self.job_parameters.engines_address.get(EngineType.STORAGE, {}),
output_table_namespace=namespace, output_table_name=name)
@DB.connection_context()
def read_metrics_from_db(self, metric_namespace: str, metric_name: str, data_type, job_level=False):
metrics = []
try:
tracking_metric_model = self.get_dynamic_db_model(TrackingMetric, self.job_id)
tracking_metrics = tracking_metric_model.select(tracking_metric_model.f_key, tracking_metric_model.f_value).where(
tracking_metric_model.f_job_id == self.job_id,
tracking_metric_model.f_component_name == (self.component_name if not job_level else job_utils.job_virtual_component_name()),
tracking_metric_model.f_role == self.role,
tracking_metric_model.f_party_id == self.party_id,
tracking_metric_model.f_metric_namespace == metric_namespace,
tracking_metric_model.f_metric_name == metric_name,
tracking_metric_model.f_type == data_type
)
for tracking_metric in tracking_metrics:
yield deserialize_b64(tracking_metric.f_key), deserialize_b64(tracking_metric.f_value)
except Exception as e:
schedule_logger(self.job_id).exception(e)
raise e
return metrics
@DB.connection_context()
def clean_metrics(self):
tracking_metric_model = self.get_dynamic_db_model(TrackingMetric, self.job_id)
operate = tracking_metric_model.delete().where(
tracking_metric_model.f_task_id==self.task_id,
tracking_metric_model.f_task_version==self.task_version,
tracking_metric_model.f_role==self.role,
tracking_metric_model.f_party_id==self.party_id
)
return operate.execute() > 0
@DB.connection_context()
def get_metric_list(self, job_level: bool = False):
metrics = dict()
tracking_metric_model = self.get_dynamic_db_model(TrackingMetric, self.job_id)
tracking_metrics = tracking_metric_model.select(tracking_metric_model.f_metric_namespace, tracking_metric_model.f_metric_name).where(
tracking_metric_model.f_job_id==self.job_id,
tracking_metric_model.f_component_name==(self.component_name if not job_level else 'dag'),
tracking_metric_model.f_role==self.role,
tracking_metric_model.f_party_id==self.party_id).distinct()
for tracking_metric in tracking_metrics:
metrics[tracking_metric.f_metric_namespace] = metrics.get(tracking_metric.f_metric_namespace, [])
metrics[tracking_metric.f_metric_namespace].append(tracking_metric.f_metric_name)
return metrics
def get_output_data_info(self, data_name=None):
return self.read_output_data_info_from_db(data_name=data_name)
def read_output_data_info_from_db(self, data_name=None):
filter_dict = {}
filter_dict["job_id"] = self.job_id
filter_dict["component_name"] = self.component_name
filter_dict["role"] = self.role
filter_dict["party_id"] = self.party_id
if data_name:
filter_dict["data_name"] = data_name
return self.query_output_data_infos(**filter_dict)
@classmethod
@DB.connection_context()
def query_output_data_infos(cls, **kwargs):
tracking_output_data_info_model = cls.get_dynamic_db_model(TrackingOutputDataInfo, kwargs.get("job_id"))
filters = []
for f_n, f_v in kwargs.items():
attr_name = 'f_%s' % f_n
if hasattr(tracking_output_data_info_model, attr_name):
filters.append(operator.attrgetter('f_%s' % f_n)(tracking_output_data_info_model) == f_v)
if filters:
output_data_infos_tmp = tracking_output_data_info_model.select().where(*filters)
else:
output_data_infos_tmp = tracking_output_data_info_model.select()
output_data_infos_group = {}
# Only the latest version of the task output data is retrieved
for output_data_info in output_data_infos_tmp:
group_key = cls.get_output_data_group_key(output_data_info.f_task_id, output_data_info.f_data_name)
if group_key not in output_data_infos_group:
output_data_infos_group[group_key] = output_data_info
elif output_data_info.f_task_version > output_data_infos_group[group_key].f_task_version:
output_data_infos_group[group_key] = output_data_info
return output_data_infos_group.values()
@classmethod
def get_output_data_group_key(cls, task_id, data_name):
return task_id + data_name
def clean_task(self, runtime_conf):
schedule_logger(self.job_id).info('clean task {} {} on {} {}'.format(self.task_id,
self.task_version,
self.role,
self.party_id))
try:
sess = session.Session(computing_type=self.job_parameters.computing_engine, federation_type=self.job_parameters.federation_engine)
# clean up temporary tables
computing_temp_namespace = job_utils.generate_session_id(task_id=self.task_id,
task_version=self.task_version,
role=self.role,
party_id=self.party_id)
if self.job_parameters.computing_engine == ComputingEngine.EGGROLL:
session_options = {"eggroll.session.processors.per.node": 1}
else:
session_options = {}
sess.init_computing(computing_session_id=f"{computing_temp_namespace}_clean", options=session_options)
sess.computing.cleanup(namespace=computing_temp_namespace, name="*")
schedule_logger(self.job_id).info('clean table by namespace {} on {} {} done'.format(computing_temp_namespace,
self.role,
self.party_id))
# clean up the last tables of the federation
federation_temp_namespace = job_utils.generate_task_version_id(self.task_id, self.task_version)
sess.computing.cleanup(namespace=federation_temp_namespace, name="*")
schedule_logger(self.job_id).info('clean table by namespace {} on {} {} done'.format(federation_temp_namespace,
self.role,
self.party_id))
sess.computing.stop()
if self.job_parameters.federation_engine == FederationEngine.RABBITMQ and self.role != "local":
schedule_logger(self.job_id).info('rabbitmq start clean up')
parties = [Party(k, p) for k, v in runtime_conf['role'].items() for p in v]
federation_session_id = job_utils.generate_task_version_id(self.task_id, self.task_version)
component_parameters_on_party = copy.deepcopy(runtime_conf)
component_parameters_on_party["local"] = {"role": self.role, "party_id": self.party_id}
sess.init_federation(federation_session_id=federation_session_id,
runtime_conf=component_parameters_on_party,
service_conf=self.job_parameters.engines_address.get(EngineType.FEDERATION, {}))
sess._federation_session.cleanup(parties)
schedule_logger(self.job_id).info('rabbitmq clean up success')
#TODO optimize the clean process
if self.job_parameters.federation_engine == FederationEngine.PULSAR and self.role != "local":
schedule_logger(self.job_id).info('start to clean up pulsar topics')
parties = [Party(k, p) for k, v in runtime_conf['role'].items() for p in v]
federation_session_id = job_utils.generate_task_version_id(self.task_id, self.task_version)
component_parameters_on_party = copy.deepcopy(runtime_conf)
component_parameters_on_party["local"] = {"role": self.role, "party_id": self.party_id}
sess.init_federation(federation_session_id=federation_session_id,
runtime_conf=component_parameters_on_party,
service_conf=self.job_parameters.engines_address.get(EngineType.FEDERATION, {}))
sess._federation_session.cleanup(parties)
schedule_logger(self.job_id).info('pulsar topic clean up success')
return True
except Exception as e:
schedule_logger(self.job_id).exception(e)
return False
@DB.connection_context()
def save_machine_learning_model_info(self):
try:
record = MLModel.get_or_none(MLModel.f_model_version == self.job_id,
MLModel.f_role == self.role,
MLModel.f_model_id == self.model_id,
MLModel.f_party_id == self.party_id)
if not record:
job = Job.get_or_none(Job.f_job_id == self.job_id)
pipeline = self.pipelined_model.read_component_model('pipeline', 'pipeline')['Pipeline']
if job:
job_data = job.to_json()
model_info = {
'job_id': job_data.get("f_job_id"),
'role': self.role,
'party_id': self.party_id,
'roles': job_data.get("f_roles"),
'model_id': self.model_id,
'model_version': self.model_version,
'initiator_role': job_data.get('f_initiator_role'),
'initiator_party_id': job_data.get('f_initiator_party_id'),
'runtime_conf': job_data.get('f_runtime_conf'),
'work_mode': job_data.get('f_work_mode'),
'train_dsl': job_data.get('f_dsl'),
'train_runtime_conf': job_data.get('f_train_runtime_conf'),
'size': self.get_model_size(),
'job_status': job_data.get('f_status'),
'parent': False if json_loads(pipeline.inference_dsl) else True,
'fate_version': pipeline.fate_version,
'runtime_conf_on_party': json_loads(pipeline.runtime_conf_on_party),
'parent_info': json_loads(pipeline.parent_info),
'inference_dsl': json_loads(pipeline.inference_dsl)
}
model_utils.save_model_info(model_info)
schedule_logger(self.job_id).info(
'save {} model info done. model id: {}, model version: {}.'.format(self.job_id,
self.model_id,
self.model_version))
else:
schedule_logger(self.job_id).info(
'save {} model info failed, no job found in db. '
'model id: {}, model version: {}.'.format(self.job_id,
self.model_id,
self.model_version))
else:
schedule_logger(self.job_id).info('model {} info has already existed in database.'.format(self.job_id))
except Exception as e:
schedule_logger(self.job_id).exception(e)
@classmethod
def get_dynamic_db_model(cls, base, job_id):
return type(base.model(table_index=cls.get_dynamic_tracking_table_index(job_id=job_id)))
@classmethod
def get_dynamic_tracking_table_index(cls, job_id):
return job_id[:8]
def get_model_size(self):
return self.pipelined_model.calculate_model_file_size()
| apache-2.0 |
matejcik/weblate | weblate/trans/views/api.py | 1 | 10715 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2016 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import json
import re
import threading
from django.core.serializers.json import DjangoJSONEncoder
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.http import (
HttpResponse, HttpResponseNotAllowed, HttpResponseBadRequest,
JsonResponse,
)
from weblate import appsettings
from weblate.trans.models import SubProject
from weblate.trans.views.helper import get_project, get_subproject
from weblate.trans.site import get_site_url
from weblate.logger import LOGGER
BITBUCKET_GIT_REPOS = (
'ssh://git@bitbucket.org/%(owner)s/%(slug)s.git',
'git@bitbucket.org:%(owner)s/%(slug)s.git',
'https://bitbucket.org/%(owner)s/%(slug)s.git',
)
BITBUCKET_HG_REPOS = (
'https://bitbucket.org/%(owner)s/%(slug)s',
'ssh://hg@bitbucket.org/%(owner)s/%(slug)s',
'hg::ssh://hg@bitbucket.org/%(owner)s/%(slug)s',
'hg::https://bitbucket.org/%(owner)s/%(slug)s',
)
BITBUCKET_REPOS = (
'ssh://git@bitbucket.org/%(full_name)s.git',
'git@bitbucket.org:%(full_name)s.git',
'https://bitbucket.org/%(full_name)s.git',
'https://bitbucket.org/%(full_name)s',
'ssh://hg@bitbucket.org/%(full_name)s',
'hg::ssh://hg@bitbucket.org/%(full_name)s',
'hg::https://bitbucket.org/%(full_name)s',
)
GITHUB_REPOS = (
'git://github.com/%(owner)s/%(slug)s.git',
'https://github.com/%(owner)s/%(slug)s.git',
'https://github.com/%(owner)s/%(slug)s',
'git@github.com:%(owner)s/%(slug)s.git',
)
HOOK_HANDLERS = {}
def hook_response(response='Update triggered', status='success'):
"""Generic okay hook response"""
return JsonResponse(
data={'status': status, 'message': response},
)
def register_hook(handler):
"""
Registers hook handler.
"""
name = handler.__name__.split('_')[0]
HOOK_HANDLERS[name] = handler
return handler
def perform_update(obj):
'''
Triggers update of given object.
'''
if appsettings.BACKGROUND_HOOKS:
thread = threading.Thread(target=obj.do_update)
thread.start()
else:
obj.do_update()
@csrf_exempt
def commit_subproject(request, project, subproject):
'''
API hook for updating git repos.
'''
if not appsettings.ENABLE_HOOKS:
return HttpResponseNotAllowed([])
obj = get_subproject(request, project, subproject, True)
if not obj.project.enable_hooks:
return HttpResponseNotAllowed([])
obj.commit_pending(request)
return hook_response('Commit performed')
@csrf_exempt
def commit_project(request, project):
'''
API hook for updating git repos.
'''
if not appsettings.ENABLE_HOOKS:
return HttpResponseNotAllowed([])
obj = get_project(request, project, True)
if not obj.enable_hooks:
return HttpResponseNotAllowed([])
obj.commit_pending(request)
return hook_response('Commit performed')
@csrf_exempt
def update_subproject(request, project, subproject):
'''
API hook for updating git repos.
'''
if not appsettings.ENABLE_HOOKS:
return HttpResponseNotAllowed([])
obj = get_subproject(request, project, subproject, True)
if not obj.project.enable_hooks:
return HttpResponseNotAllowed([])
perform_update(obj)
return hook_response()
@csrf_exempt
def update_project(request, project):
'''
API hook for updating git repos.
'''
if not appsettings.ENABLE_HOOKS:
return HttpResponseNotAllowed([])
obj = get_project(request, project, True)
if not obj.enable_hooks:
return HttpResponseNotAllowed([])
perform_update(obj)
return hook_response()
@require_POST
@csrf_exempt
def vcs_service_hook(request, service):
'''
Shared code between VCS service hooks.
Currently used for bitbucket_hook, github_hook and gitlab_hook, but should
be usable for other VCS services (Google Code, custom coded sites, etc.)
too.
'''
# We support only post methods
if not appsettings.ENABLE_HOOKS:
return HttpResponseNotAllowed(())
# Check if we got payload
try:
# GitLab sends json as application/json
if request.META['CONTENT_TYPE'] == 'application/json':
data = json.loads(request.body.decode('utf-8'))
# Bitbucket and GitHub sends json as x-www-form-data
else:
data = json.loads(request.POST['payload'])
except (ValueError, KeyError, UnicodeError):
return HttpResponseBadRequest('Could not parse JSON payload!')
# Get service helper
hook_helper = HOOK_HANDLERS[service]
# Send the request data to the service handler.
try:
service_data = hook_helper(data)
except KeyError:
LOGGER.error('failed to parse service %s data', service)
return HttpResponseBadRequest('Invalid data in json payload!')
# Log data
service_long_name = service_data['service_long_name']
repos = service_data['repos']
repo_url = service_data['repo_url']
branch = service_data['branch']
LOGGER.info(
'received %s notification on repository %s, branch %s',
service_long_name, repo_url, branch
)
subprojects = SubProject.objects.filter(repo__in=repos)
if branch is not None:
subprojects = subprojects.filter(branch=branch)
# Trigger updates
updates = 0
for obj in subprojects:
if not obj.project.enable_hooks:
continue
updates += 1
LOGGER.info(
'%s notification will update %s',
service_long_name,
obj
)
perform_update(obj)
if updates == 0:
return hook_response('No matching repositories found!', 'failure')
return hook_response()
def bitbucket_webhook_helper(data):
"""API to handle webhooks from Bitbucket"""
repos = [
repo % {'full_name': data['repository']['full_name']}
for repo in BITBUCKET_REPOS
]
return {
'service_long_name': 'Bitbucket',
'repo_url': data['repository']['links']['html']['href'],
'repos': repos,
'branch': data['push']['changes'][-1]['new']['name']
}
@register_hook
def bitbucket_hook_helper(data):
'''
API to handle service hooks from Bitbucket.
'''
if 'push' in data:
return bitbucket_webhook_helper(data)
# Parse owner, branch and repository name
owner = data['repository']['owner']
slug = data['repository']['slug']
if data['commits']:
branch = data['commits'][-1]['branch']
else:
branch = None
params = {'owner': owner, 'slug': slug}
# Construct possible repository URLs
if data['repository']['scm'] == 'git':
repos = [repo % params for repo in BITBUCKET_GIT_REPOS]
elif data['repository']['scm'] == 'hg':
repos = [repo % params for repo in BITBUCKET_HG_REPOS]
else:
LOGGER.error(
'unsupported repository: %s',
repr(data['repository'])
)
raise ValueError('unsupported repository')
return {
'service_long_name': 'Bitbucket',
'repo_url': ''.join([
data['canon_url'], data['repository']['absolute_url']
]),
'repos': repos,
'branch': branch,
}
@register_hook
def github_hook_helper(data):
'''
API to handle commit hooks from GitHub.
'''
# Parse owner, branch and repository name
owner = data['repository']['owner']['name']
slug = data['repository']['name']
branch = re.sub(r'^refs/heads/', '', data['ref'])
params = {'owner': owner, 'slug': slug}
# Construct possible repository URLs
repos = [repo % params for repo in GITHUB_REPOS]
return {
'service_long_name': 'GitHub',
'repo_url': data['repository']['url'],
'repos': repos,
'branch': branch,
}
@register_hook
def gitlab_hook_helper(data):
'''
API to handle commit hooks from GitLab.
'''
ssh_url = data['repository']['url']
http_url = '.'.join((data['repository']['homepage'], 'git'))
branch = re.sub(r'^refs/heads/', '', data['ref'])
# Construct possible repository URLs
repos = [
ssh_url,
http_url,
data['repository']['git_http_url'],
data['repository']['git_ssh_url'],
]
return {
'service_long_name': 'GitLab',
'repo_url': data['repository']['homepage'],
'repos': repos,
'branch': branch,
}
def export_stats(request, project, subproject):
'''
Exports stats in JSON format.
'''
subprj = get_subproject(request, project, subproject, True)
jsonp = None
if 'jsonp' in request.GET and request.GET['jsonp']:
jsonp = request.GET['jsonp']
response = []
for trans in subprj.translation_set.all():
response.append({
'code': trans.language.code,
'name': trans.language.name,
'total': trans.total,
'total_words': trans.total_words,
'last_change': trans.last_change,
'last_author': trans.get_last_author(),
'translated': trans.translated,
'translated_words': trans.translated_words,
'translated_percent': trans.get_translated_percent(),
'fuzzy': trans.fuzzy,
'fuzzy_percent': trans.get_fuzzy_percent(),
'failing': trans.failing_checks,
'failing_percent': trans.get_failing_checks_percent(),
'url': trans.get_share_url(),
'url_translate': get_site_url(trans.get_absolute_url()),
})
if jsonp:
return HttpResponse(
'{0}({1})'.format(
jsonp,
json.dumps(
response,
cls=DjangoJSONEncoder,
)
),
content_type='application/javascript'
)
return JsonResponse(
data=response,
safe=False
)
| gpl-3.0 |
rvmoura96/projeto-almoxarifado | myvenv/Lib/site-packages/django/utils/dateparse.py | 44 | 4235 | """Functions to parse datetime objects."""
# We're using regular expressions rather than time.strptime because:
# - They provide both validation and parsing.
# - They're more flexible for datetimes.
# - The date/datetime/time constructors produce friendlier error messages.
import datetime
import re
from django.utils import six
from django.utils.timezone import get_fixed_timezone, utc
date_re = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})$'
)
time_re = re.compile(
r'(?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
)
datetime_re = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
r'[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
r'(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$'
)
standard_duration_re = re.compile(
r'^'
r'(?:(?P<days>-?\d+) (days?, )?)?'
r'((?:(?P<hours>-?\d+):)(?=\d+:\d+))?'
r'(?:(?P<minutes>-?\d+):)?'
r'(?P<seconds>-?\d+)'
r'(?:\.(?P<microseconds>\d{1,6})\d{0,6})?'
r'$'
)
# Support the sections of ISO 8601 date representation that are accepted by
# timedelta
iso8601_duration_re = re.compile(
r'^(?P<sign>[-+]?)'
r'P'
r'(?:(?P<days>\d+(.\d+)?)D)?'
r'(?:T'
r'(?:(?P<hours>\d+(.\d+)?)H)?'
r'(?:(?P<minutes>\d+(.\d+)?)M)?'
r'(?:(?P<seconds>\d+(.\d+)?)S)?'
r')?'
r'$'
)
def parse_date(value):
"""Parses a string and return a datetime.date.
Raises ValueError if the input is well formatted but not a valid date.
Returns None if the input isn't well formatted.
"""
match = date_re.match(value)
if match:
kw = {k: int(v) for k, v in six.iteritems(match.groupdict())}
return datetime.date(**kw)
def parse_time(value):
"""Parses a string and return a datetime.time.
This function doesn't support time zone offsets.
Raises ValueError if the input is well formatted but not a valid time.
Returns None if the input isn't well formatted, in particular if it
contains an offset.
"""
match = time_re.match(value)
if match:
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
kw = {k: int(v) for k, v in six.iteritems(kw) if v is not None}
return datetime.time(**kw)
def parse_datetime(value):
"""Parses a string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raises ValueError if the input is well formatted but not a valid datetime.
Returns None if the input isn't well formatted.
"""
match = datetime_re.match(value)
if match:
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
tzinfo = kw.pop('tzinfo')
if tzinfo == 'Z':
tzinfo = utc
elif tzinfo is not None:
offset_mins = int(tzinfo[-2:]) if len(tzinfo) > 3 else 0
offset = 60 * int(tzinfo[1:3]) + offset_mins
if tzinfo[0] == '-':
offset = -offset
tzinfo = get_fixed_timezone(offset)
kw = {k: int(v) for k, v in six.iteritems(kw) if v is not None}
kw['tzinfo'] = tzinfo
return datetime.datetime(**kw)
def parse_duration(value):
"""Parses a duration string and returns a datetime.timedelta.
The preferred format for durations in Django is '%d %H:%M:%S.%f'.
Also supports ISO 8601 representation.
"""
match = standard_duration_re.match(value)
if not match:
match = iso8601_duration_re.match(value)
if match:
kw = match.groupdict()
sign = -1 if kw.pop('sign', '+') == '-' else 1
if kw.get('microseconds'):
kw['microseconds'] = kw['microseconds'].ljust(6, '0')
if kw.get('seconds') and kw.get('microseconds') and kw['seconds'].startswith('-'):
kw['microseconds'] = '-' + kw['microseconds']
kw = {k: float(v) for k, v in six.iteritems(kw) if v is not None}
return sign * datetime.timedelta(**kw)
| mit |
googleapis/proto-breaking-change-detector | src/detector/loader.py | 1 | 4920 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import shutil
import os
import subprocess
from subprocess import CalledProcessError, PIPE
from typing import Optional, Sequence
import tempfile
from google.protobuf import descriptor_pb2 as desc
from grpc_tools import protoc
class Loader:
# This loader is a wrapper of protoc command.
# It takes in protoc command arguments (e.g. proto files,
# descriptor_set_out and proto directories), executes the command
# and cleans up the generated descriptor_set file.
# This also works as the **temporary** solution of loading FileDescriptorSet
# from API definition files that ussers pass in from the command line.
_CURRENT_DIR = os.getcwd()
PROTOBUF_PROTOS_DIR = os.path.join(_CURRENT_DIR, "protobuf/src")
GRPC_TOOLS_PROTOC = "grpc_tools.protoc"
def __init__(
self,
proto_definition_dirs: Sequence[str],
proto_files: Sequence[str],
descriptor_set: str,
include_source_code: bool = True,
protoc_binary: Optional[str] = None,
local_protobuf: bool = True,
):
self.proto_definition_dirs = proto_definition_dirs
self.descriptor_set = descriptor_set
self.proto_files = proto_files
self.include_source_code = include_source_code
self.protoc_binary = protoc_binary or self.GRPC_TOOLS_PROTOC
self.local_protobuf = local_protobuf
def get_descriptor_set(self) -> desc.FileDescriptorSet:
local_dir = os.getcwd()
desc_set = desc.FileDescriptorSet()
# If users pass in descriptor set file directly, we
# can skip running the protoc command.
if self.descriptor_set:
with open(self.descriptor_set, "rb") as f:
desc_set.ParseFromString(f.read())
return desc_set
# Construct the protoc command with proper argument prefix.
protoc_command = [self.protoc_binary]
for directory in self.proto_definition_dirs:
if self.local_protobuf:
protoc_command.append(f"--proto_path={directory}")
else:
protoc_command.append(f"--proto_path={local_dir}/{directory}")
if self.local_protobuf:
protoc_command.append(f"--proto_path={self.PROTOBUF_PROTOS_DIR}")
if self.include_source_code:
protoc_command.append("--include_source_info")
# Include the imported dependencies.
protoc_command.append("--include_imports")
if self.local_protobuf:
protoc_command.extend(pf for pf in self.proto_files)
else:
protoc_command.extend((local_dir + "/" + pf) for pf in self.proto_files)
# Run protoc command to get pb file that contains serialized data of
# the proto files.
if self.protoc_binary == self.GRPC_TOOLS_PROTOC:
fd, path = tempfile.mkstemp()
protoc_command.append("--descriptor_set_out=" + path)
# Use grpcio-tools.protoc to compile proto files
if protoc.main(protoc_command) != 0:
raise _ProtocInvokerException(
f"Protoc command to load the descriptor set fails. {protoc_command}"
)
else:
# Create FileDescriptorSet from the serialized data.
with open(fd, "rb") as f:
desc_set.ParseFromString(f.read())
return desc_set
try:
protoc_command.append("-o/dev/stdout")
union_command = " ".join(protoc_command)
logging.info(f"Run protoc command: {union_command}")
process = subprocess.run(
union_command, shell=True, stdout=PIPE, stderr=PIPE
)
logging.info(f"Check the process output is not empty:")
logging.info(bool(process.stdout))
if process.returncode != 0:
raise _ProtocInvokerException(
f"Protoc command to load the descriptor set fails. {union_command}, error: {process.stderr}"
)
except (CalledProcessError, FileNotFoundError) as e:
logging.info(f"Call process error: {e}")
# Create FileDescriptorSet from the serialized data.
desc_set.ParseFromString(process.stdout)
return desc_set
class _ProtocInvokerException(Exception):
pass
| apache-2.0 |
robingall2910/RobTheBoat | utils/tools.py | 1 | 2926 | import re
import requests
import discord
import io
from datetime import datetime
_USER_ID_MATCH = re.compile(r"<@(\d+)>")
_EMOTE_ID_MATCH = re.compile(r"<:(.+?):(\d+)>")
py = "```py\n{}```"
xl = "```xl\n{}```"
diff = "```diff\n{}```"
def write_file(filename, contents):
with open(filename, "w", encoding="utf8") as file:
for item in contents:
file.write(str(item))
file.write("\n")
def download_file(url, destination):
req = requests.get(url)
file = open(destination, "wb")
for chunk in req.iter_content(100000):
file.write(chunk)
file.close()
def extract_emote_id(arg):
match = _EMOTE_ID_MATCH.match(arg)
if match:
return str(match.group(2))
def get_avatar(user, animate=True):
if user.avatar_url:
avatar = user.avatar_url
else:
avatar = user.default_avatar_url
if not animate:
avatar = avatar.replace(".gif", ".png")
return avatar
def make_message_embed(author, color, message, *, formatUser=False, useNick=False):
if formatUser:
name = str(author)
elif useNick:
name = author.display_name
else:
name = author.name
embed = discord.Embed(color=color, description=message)
embed.set_author(name=name, icon_url=get_avatar(author))
return embed
def remove_html(arg):
arg = arg.replace(""", "\"").replace("<br />", "").replace("[i]", "*").replace("[/i]", "*")
arg = arg.replace("“", "\"").replace("”", "\"").replace("'", "'").replace("—", "—")
arg = arg.replace("–", "–")
return arg
def make_list_embed(fields):
embed = discord.Embed(description="\u200b")
for key, value in fields.items():
embed.add_field(name=key, value=value, inline=True)
return embed
def format_time(time):
return time.strftime("%B %d, %Y at %I:%M:%S %p")
def convert_to_bool(arg):
arg = str(arg).lower()
if arg in ["yes", "y", "true", "t", "1", "enable", "on"]:
return True
elif arg in ["no", "n", "false", "f", "0", "disable", "off"]:
return False
else:
raise ValueError
def strip_global_mentions(message, ctx=None):
if ctx:
perms = ctx.message.channel.permissions_for(ctx.message.author)
if perms.mention_everyone:
return message
remove_everyone = re.compile(re.escape("@everyone"), re.IGNORECASE)
remove_here = re.compile(re.escape("@here"), re.IGNORECASE)
message = remove_everyone.sub("everyone", message)
message = remove_here.sub("here", message)
return message
def format_number(number):
return "{:,d}".format(number)
def url_to_bytes(url):
data = requests.get(url)
content = io.BytesIO(data.content)
filename = url.rsplit("/", 1)[-1]
return {"content":content, "filename":filename} | mit |
ryszard/whoosh | src/whoosh/support/bitvector.py | 2 | 3585 | import operator
from array import array
# Table of the number of '1' bits in each byte (0-255)
BYTE_COUNTS = array('B',[
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8])
class BitVector(object):
def __init__(self, size, bits = None, source = None):
self.size = size
if bits:
self.bits = bits
else:
self.bits = array("B", ([0x00] * ((size >> 3) + 1)))
if source:
for num in source:
self.set(num)
self.bcount = None
def __len__(self):
return self.size
def __contains__(self, index):
return self[index]
def __iter__(self):
get = self.__getitem__
for i in xrange(0, self.size):
if get(i):
yield i
def __repr__(self):
return "<BitVector %s>" % self.__str__()
def __str__(self):
get = self.__getitem__
return "".join("1" if get(i) else "0"
for i in xrange(0, self.size))
def __getitem__(self, index):
return self.bits[index >> 3] & (1 << (index & 7)) != 0
def __setitem__(self, index, value):
if value:
self.set(index)
else:
self.clear(index)
def _logic(self, op, bitv):
if self.size != bitv.size:
raise ValueError("Can't combine bitvectors of different sizes")
res = BitVector(size = self.size )
lpb = map(op, self.bits, bitv.bits)
res.bits = array('B', lpb )
return res
def __and__(self, bitv):
return self._logic(operator.__and__, bitv)
def __or__(self, bitv):
return self._logic(operator.__or__, bitv)
def __xor__(self, bitv):
return self._logic(operator.__xor__, bitv)
def count(self):
if self.bcount is None:
c = 0
for b in self.bits:
c += BYTE_COUNTS[b & 0xFF]
self.bcount = c
return self.bcount
def set(self, index):
self.bits[index >> 3] |= 1 << (index & 7)
self.bcount = None
def clear(self, index):
self.bits[index >> 3] &= ~(1 << (index & 7))
self.bcount = None
def copy(self):
return BitVector(self.size, bits = self.bits)
if __name__ == "__main__":
b = BitVector(10)
b.set(1)
b.set(9)
b.set(5)
print b
print b[2]
print b[5]
b.clear(5)
print b[5]
print b
c = BitVector(10)
c.set(1)
c.set(5)
print " ", b
print "^", c
print "=", b ^ c
| apache-2.0 |
Captain-Coder/tribler | Tribler/Core/Modules/wallet/wallet.py | 1 | 1837 | from __future__ import absolute_import
import abc
import logging
import random
import string
import six
from Tribler.pyipv8.ipv8.taskmanager import TaskManager
class InsufficientFunds(Exception):
"""
Used for throwing exception when there isn't sufficient funds available to transfer assets.
"""
pass
class Wallet(six.with_metaclass(abc.ABCMeta, TaskManager)):
"""
This is the base class of a wallet and contains various methods that every wallet should implement.
To create your own wallet, subclass this class and implement the required methods.
"""
def __init__(self):
super(Wallet, self).__init__()
self._logger = logging.getLogger(self.__class__.__name__)
self.created = False
self.unlocked = False
def generate_txid(self, length=10):
"""
Generate a random transaction ID
"""
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length))
@abc.abstractmethod
def get_identifier(self):
return
@abc.abstractmethod
def get_name(self):
return
@abc.abstractmethod
def create_wallet(self, *args, **kwargs):
return
@abc.abstractmethod
def get_balance(self):
return
@abc.abstractmethod
def transfer(self, *args, **kwargs):
return
@abc.abstractmethod
def get_address(self):
return
@abc.abstractmethod
def get_transactions(self):
return
@abc.abstractmethod
def min_unit(self):
return
@abc.abstractmethod
def precision(self):
"""
The precision of an asset inside a wallet represents the number of digits after the decimal.
For fiat currency, the precision would be 2 since the minimum unit is often 0.01.
"""
return
| lgpl-3.0 |
jonathanmarvens/pycco | pycco_resources/__init__.py | 19 | 8075 | css = """\
/*--------------------- Layout and Typography ----------------------------*/
body {
font-family: 'Palatino Linotype', 'Book Antiqua', Palatino, FreeSerif, serif;
font-size: 16px;
line-height: 24px;
color: #252519;
margin: 0; padding: 0;
background: #f5f5ff;
}
a {
color: #261a3b;
}
a:visited {
color: #261a3b;
}
p {
margin: 0 0 15px 0;
}
h1, h2, h3, h4, h5, h6 {
margin: 40px 0 15px 0;
}
h2, h3, h4, h5, h6 {
margin-top: 0;
}
#container {
background: white;
}
#container, div.section {
position: relative;
}
#background {
position: absolute;
top: 0; left: 580px; right: 0; bottom: 0;
background: #f5f5ff;
border-left: 1px solid #e5e5ee;
z-index: 0;
}
#jump_to, #jump_page {
background: white;
-webkit-box-shadow: 0 0 25px #777; -moz-box-shadow: 0 0 25px #777;
-webkit-border-bottom-left-radius: 5px; -moz-border-radius-bottomleft: 5px;
font: 10px Arial;
text-transform: uppercase;
cursor: pointer;
text-align: right;
}
#jump_to, #jump_wrapper {
position: fixed;
right: 0; top: 0;
padding: 5px 10px;
}
#jump_wrapper {
padding: 0;
display: none;
}
#jump_to:hover #jump_wrapper {
display: block;
}
#jump_page {
padding: 5px 0 3px;
margin: 0 0 25px 25px;
}
#jump_page .source {
display: block;
padding: 5px 10px;
text-decoration: none;
border-top: 1px solid #eee;
}
#jump_page .source:hover {
background: #f5f5ff;
}
#jump_page .source:first-child {
}
div.docs {
float: left;
max-width: 500px;
min-width: 500px;
min-height: 5px;
padding: 10px 25px 1px 50px;
vertical-align: top;
text-align: left;
}
.docs pre {
margin: 15px 0 15px;
padding-left: 15px;
}
.docs p tt, .docs p code {
background: #f8f8ff;
border: 1px solid #dedede;
font-size: 12px;
padding: 0 0.2em;
}
.octowrap {
position: relative;
}
.octothorpe {
font: 12px Arial;
text-decoration: none;
color: #454545;
position: absolute;
top: 3px; left: -20px;
padding: 1px 2px;
opacity: 0;
-webkit-transition: opacity 0.2s linear;
}
div.docs:hover .octothorpe {
opacity: 1;
}
div.code {
margin-left: 580px;
padding: 14px 15px 16px 50px;
vertical-align: top;
}
.code pre, .docs p code {
font-size: 12px;
}
pre, tt, code {
line-height: 18px;
font-family: Monaco, Consolas, "Lucida Console", monospace;
margin: 0; padding: 0;
}
div.clearall {
clear: both;
}
/*---------------------- Syntax Highlighting -----------------------------*/
td.linenos { background-color: #f0f0f0; padding-right: 10px; }
span.lineno { background-color: #f0f0f0; padding: 0 5px 0 5px; }
body .hll { background-color: #ffffcc }
body .c { color: #408080; font-style: italic } /* Comment */
body .err { border: 1px solid #FF0000 } /* Error */
body .k { color: #954121 } /* Keyword */
body .o { color: #666666 } /* Operator */
body .cm { color: #408080; font-style: italic } /* Comment.Multiline */
body .cp { color: #BC7A00 } /* Comment.Preproc */
body .c1 { color: #408080; font-style: italic } /* Comment.Single */
body .cs { color: #408080; font-style: italic } /* Comment.Special */
body .gd { color: #A00000 } /* Generic.Deleted */
body .ge { font-style: italic } /* Generic.Emph */
body .gr { color: #FF0000 } /* Generic.Error */
body .gh { color: #000080; font-weight: bold } /* Generic.Heading */
body .gi { color: #00A000 } /* Generic.Inserted */
body .go { color: #808080 } /* Generic.Output */
body .gp { color: #000080; font-weight: bold } /* Generic.Prompt */
body .gs { font-weight: bold } /* Generic.Strong */
body .gu { color: #800080; font-weight: bold } /* Generic.Subheading */
body .gt { color: #0040D0 } /* Generic.Traceback */
body .kc { color: #954121 } /* Keyword.Constant */
body .kd { color: #954121; font-weight: bold } /* Keyword.Declaration */
body .kn { color: #954121; font-weight: bold } /* Keyword.Namespace */
body .kp { color: #954121 } /* Keyword.Pseudo */
body .kr { color: #954121; font-weight: bold } /* Keyword.Reserved */
body .kt { color: #B00040 } /* Keyword.Type */
body .m { color: #666666 } /* Literal.Number */
body .s { color: #219161 } /* Literal.String */
body .na { color: #7D9029 } /* Name.Attribute */
body .nb { color: #954121 } /* Name.Builtin */
body .nc { color: #0000FF; font-weight: bold } /* Name.Class */
body .no { color: #880000 } /* Name.Constant */
body .nd { color: #AA22FF } /* Name.Decorator */
body .ni { color: #999999; font-weight: bold } /* Name.Entity */
body .ne { color: #D2413A; font-weight: bold } /* Name.Exception */
body .nf { color: #0000FF } /* Name.Function */
body .nl { color: #A0A000 } /* Name.Label */
body .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */
body .nt { color: #954121; font-weight: bold } /* Name.Tag */
body .nv { color: #19469D } /* Name.Variable */
body .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */
body .w { color: #bbbbbb } /* Text.Whitespace */
body .mf { color: #666666 } /* Literal.Number.Float */
body .mh { color: #666666 } /* Literal.Number.Hex */
body .mi { color: #666666 } /* Literal.Number.Integer */
body .mo { color: #666666 } /* Literal.Number.Oct */
body .sb { color: #219161 } /* Literal.String.Backtick */
body .sc { color: #219161 } /* Literal.String.Char */
body .sd { color: #219161; font-style: italic } /* Literal.String.Doc */
body .s2 { color: #219161 } /* Literal.String.Double */
body .se { color: #BB6622; font-weight: bold } /* Literal.String.Escape */
body .sh { color: #219161 } /* Literal.String.Heredoc */
body .si { color: #BB6688; font-weight: bold } /* Literal.String.Interpol */
body .sx { color: #954121 } /* Literal.String.Other */
body .sr { color: #BB6688 } /* Literal.String.Regex */
body .s1 { color: #219161 } /* Literal.String.Single */
body .ss { color: #19469D } /* Literal.String.Symbol */
body .bp { color: #954121 } /* Name.Builtin.Pseudo */
body .vc { color: #19469D } /* Name.Variable.Class */
body .vg { color: #19469D } /* Name.Variable.Global */
body .vi { color: #19469D } /* Name.Variable.Instance */
body .il { color: #666666 } /* Literal.Number.Integer.Long */
"""
html = """\
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<title>{{ title }}</title>
<link rel="stylesheet" href="{{ stylesheet }}">
</head>
<body>
<div id='container'>
<div id="background"></div>
{{#sources?}}
<div id="jump_to">
Jump To …
<div id="jump_wrapper">
<div id="jump_page">
{{#sources}}
<a class="source" href="{{ url }}">{{ basename }}</a>
{{/sources}}
</div>
</div>
</div>
{{/sources?}}
<div class='section'>
<div class='docs'><h1>{{ title }}</h1></div>
</div>
<div class='clearall'>
{{#sections}}
<div class='section' id='section-{{ num }}'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-{{ num }}'>#</a>
</div>
{{{ docs_html }}}
</div>
<div class='code'>
{{{ code_html }}}
</div>
</div>
<div class='clearall'></div>
{{/sections}}
</div>
</body>
"""
| mit |
rjdp/Easynginedemoplugin | ee/core/aptget.py | 1 | 4930 | """EasyEngine package installation using apt-get module."""
import apt
import apt_pkg
import sys
import subprocess
from ee.core.logging import Log
from sh import apt_get
from sh import ErrorReturnCode
class EEAptGet():
"""Generic apt-get intialisation"""
def update(self):
"""
Similar to `apt-get upgrade`
"""
try:
with open('/var/log/ee/ee.log', 'a') as f:
proc = subprocess.Popen('apt-get update',
shell=True,
stdin=None, stdout=f, stderr=f,
executable="/bin/bash")
proc.wait()
if proc.returncode == 0:
return True
else:
Log.error(self, "Unable to run apt-get update")
except Exception as e:
Log.error(self, "Error while installing packages, "
"apt-get exited with error")
def dist_upgrade():
"""
Similar to `apt-get upgrade`
"""
try:
apt_cache = apt.cache.Cache()
apt_cache.update()
apt_cache.open(None)
apt_cache.upgrade(True)
success = (apt_cache.commit(
apt.progress.text.AcquireProgress(),
apt.progress.base.InstallProgress()))
# apt_cache.close()
return success
except AttributeError as e:
Log.error(self, 'AttributeError: ' + str(e))
except FetchFailedException as e:
Log.debug(self, 'SystemError: ' + str(e))
Log.error(self, 'Unable to Fetch update')
def install(self, packages):
all_packages = ' '.join(packages)
try:
with open('/var/log/ee/ee.log', 'a') as f:
proc = subprocess.Popen("apt-get install -o Dpkg::Options::=--"
"force-confold -y {0}"
.format(all_packages), shell=True,
stdin=None, stdout=f, stderr=f,
executable="/bin/bash")
proc.wait()
if proc.returncode == 0:
return True
else:
Log.error(self, "Unable to run apt-get install")
except Exception as e:
Log.error(self, "Error while installing packages, "
"apt-get exited with error")
def remove(self, packages, auto=False, purge=False):
all_packages = ' '.join(packages)
try:
with open('/var/log/ee/ee.log', 'a') as f:
if purge:
proc = subprocess.Popen('apt-get purge -y {0}'
.format(all_packages), shell=True,
stdin=None, stdout=f, stderr=f,
executable="/bin/bash")
else:
proc = subprocess.Popen('apt-get remove -y {0}'
.format(all_packages), shell=True,
stdin=None, stdout=f, stderr=f,
executable="/bin/bash")
proc.wait()
if proc.returncode == 0:
return True
else:
Log.error(self, "Unable to run apt-get remove/purge")
except Exception as e:
Log.error(self, "Error while installing packages, "
"apt-get exited with error")
def auto_clean(self):
"""
Similar to `apt-get autoclean`
"""
try:
orig_out = sys.stdout
sys.stdout = open(self.app.config.get('log.logging', 'file'),
encoding='utf-8', mode='a')
apt_get.autoclean("-y")
sys.stdout = orig_out
except ErrorReturnCode as e:
Log.debug(self, "{0}".format(e))
Log.error(self, "Unable to apt-get autoclean")
def auto_remove(self):
"""
Similar to `apt-get autoremove`
"""
try:
Log.debug(self, "Running apt-get autoremove")
apt_get.autoremove("-y")
except ErrorReturnCode as e:
Log.debug(self, "{0}".format(e))
Log.error(self, "Unable to apt-get autoremove")
def is_installed(self, package_name):
"""
Checks if package is available in cache and is installed or not
returns True if installed otherwise returns False
"""
apt_cache = apt.cache.Cache()
apt_cache.open()
if (package_name.strip() in apt_cache and
apt_cache[package_name.strip()].is_installed):
# apt_cache.close()
return True
# apt_cache.close()
return False
| mit |
Yubico/pkcs11test | googletest-release-1.8.0/googletest/test/gtest_color_test.py | 3259 | 4911 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
| apache-2.0 |
romain-li/edx-platform | common/lib/xmodule/xmodule/services.py | 163 | 2772 | """
Module contains various XModule/XBlock services
"""
from django.conf import settings
class SettingsService(object):
"""
Allows server-wide configuration of XBlocks on a per-type basis
XBlock settings are read from XBLOCK_SETTINGS settings key. Each XBlock is allowed access
to single settings bucket. Bucket is determined by this service using the following rules:
* Value of SettingsService.xblock_settings_bucket_selector is examined. If XBlock have attribute/property
with the name of that value this attribute/property is read to get the bucket key (e.g. if XBlock have
`block_settings_key = 'my_block_settings'`, bucket key would be 'my_block_settings').
* Otherwise, XBlock class name is used
Service is content-agnostic: it just returns whatever happen to be in the settings bucket (technically, it returns
the bucket itself).
If `default` argument is specified it is returned if:
* There are no XBLOCK_SETTINGS setting
* XBLOCK_SETTINGS is empty
* XBLOCK_SETTINGS does not contain settings bucket
If `default` is not specified or None, empty dictionary is used for default.
Example:
"XBLOCK_SETTINGS": {
"my_block": {
"setting1": 1,
"setting2": []
},
"my_other_block": [1, 2, 3],
"MyThirdBlock": "QWERTY"
}
class MyBlock: block_settings_key='my_block'
class MyOtherBlock: block_settings_key='my_other_block'
class MyThirdBlock: pass
class MissingBlock: pass
service = SettingsService()
service.get_settings_bucket(MyBlock()) # { "setting1": 1, "setting2": [] }
service.get_settings_bucket(MyOtherBlock()) # [1, 2, 3]
service.get_settings_bucket(MyThirdBlock()) # "QWERTY"
service.get_settings_bucket(MissingBlock()) # {}
service.get_settings_bucket(MissingBlock(), "default") # "default"
service.get_settings_bucket(MissingBlock(), None) # {}
"""
xblock_settings_bucket_selector = 'block_settings_key'
def get_settings_bucket(self, block, default=None):
""" Gets xblock settings dictionary from settings. """
if not block:
raise ValueError("Expected XBlock instance, got {0} of type {1}".format(block, type(block)))
actual_default = default if default is not None else {}
xblock_settings_bucket = getattr(block, self.xblock_settings_bucket_selector, block.unmixed_class.__name__)
xblock_settings = settings.XBLOCK_SETTINGS if hasattr(settings, "XBLOCK_SETTINGS") else {}
return xblock_settings.get(xblock_settings_bucket, actual_default)
| agpl-3.0 |
waseem18/oh-mainline | vendor/packages/twisted/twisted/python/test/test_fakepwd.py | 18 | 7525 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.fakepwd}.
"""
try:
import pwd
except ImportError:
pwd = None
from operator import getitem
from twisted.trial.unittest import TestCase
from twisted.python.fakepwd import UserDatabase
from twisted.python.compat import set
class UserDatabaseTestsMixin:
"""
L{UserDatabaseTestsMixin} defines tests which apply to any user database
implementation. Subclasses should mix it in, implement C{setUp} to create
C{self.database} bound to a user database instance, and implement
C{getExistingUserInfo} to return information about a user (such information
should be unique per test method).
"""
def test_getpwuid(self):
"""
I{getpwuid} accepts a uid and returns the user record associated with
it.
"""
for i in range(2):
# Get some user which exists in the database.
username, password, uid, gid, gecos, dir, shell = self.getExistingUserInfo()
# Now try to look it up and make sure the result is correct.
entry = self.database.getpwuid(uid)
self.assertEquals(entry.pw_name, username)
self.assertEquals(entry.pw_passwd, password)
self.assertEquals(entry.pw_uid, uid)
self.assertEquals(entry.pw_gid, gid)
self.assertEquals(entry.pw_gecos, gecos)
self.assertEquals(entry.pw_dir, dir)
self.assertEquals(entry.pw_shell, shell)
def test_noSuchUID(self):
"""
I{getpwuid} raises L{KeyError} when passed a uid which does not exist
in the user database.
"""
self.assertRaises(KeyError, self.database.getpwuid, -13)
def test_getpwnam(self):
"""
I{getpwnam} accepts a username and returns the user record associated
with it.
"""
for i in range(2):
# Get some user which exists in the database.
username, password, uid, gid, gecos, dir, shell = self.getExistingUserInfo()
# Now try to look it up and make sure the result is correct.
entry = self.database.getpwnam(username)
self.assertEquals(entry.pw_name, username)
self.assertEquals(entry.pw_passwd, password)
self.assertEquals(entry.pw_uid, uid)
self.assertEquals(entry.pw_gid, gid)
self.assertEquals(entry.pw_gecos, gecos)
self.assertEquals(entry.pw_dir, dir)
self.assertEquals(entry.pw_shell, shell)
def test_noSuchName(self):
"""
I{getpwnam} raises L{KeyError} when passed a username which does not
exist in the user database.
"""
self.assertRaises(
KeyError, self.database.getpwnam,
'no' 'such' 'user' 'exists' 'the' 'name' 'is' 'too' 'long' 'and' 'has'
'\1' 'in' 'it' 'too')
def test_recordLength(self):
"""
The user record returned by I{getpwuid}, I{getpwnam}, and I{getpwall}
has a length.
"""
db = self.database
username, password, uid, gid, gecos, dir, shell = self.getExistingUserInfo()
for entry in [db.getpwuid(uid), db.getpwnam(username), db.getpwall()[0]]:
self.assertIsInstance(len(entry), int)
def test_recordIndexable(self):
"""
The user record returned by I{getpwuid}, I{getpwnam}, and I{getpwall}
is indexable, with successive indexes starting from 0 corresponding to
the values of the C{pw_name}, C{pw_passwd}, C{pw_uid}, C{pw_gid},
C{pw_gecos}, C{pw_dir}, and C{pw_shell} attributes, respectively.
"""
db = self.database
username, password, uid, gid, gecos, dir, shell = self.getExistingUserInfo()
for entry in [db.getpwuid(uid), db.getpwnam(username), db.getpwall()[0]]:
self.assertEquals(entry[0], username)
self.assertEquals(entry[1], password)
self.assertEquals(entry[2], uid)
self.assertEquals(entry[3], gid)
self.assertEquals(entry[4], gecos)
self.assertEquals(entry[5], dir)
self.assertEquals(entry[6], shell)
self.assertEquals(len(entry), len(list(entry)))
self.assertRaises(IndexError, getitem, entry, 7)
class UserDatabaseTests(TestCase, UserDatabaseTestsMixin):
"""
Tests for L{UserDatabase}.
"""
def setUp(self):
"""
Create a L{UserDatabase} with no user data in it.
"""
self.database = UserDatabase()
self._counter = 0
def getExistingUserInfo(self):
"""
Add a new user to C{self.database} and return its information.
"""
self._counter += 1
suffix = '_' + str(self._counter)
username = 'username' + suffix
password = 'password' + suffix
uid = self._counter
gid = self._counter + 1000
gecos = 'gecos' + suffix
dir = 'dir' + suffix
shell = 'shell' + suffix
self.database.addUser(username, password, uid, gid, gecos, dir, shell)
return (username, password, uid, gid, gecos, dir, shell)
def test_addUser(self):
"""
L{UserDatabase.addUser} accepts seven arguments, one for each field of
a L{pwd.struct_passwd}, and makes the new record available via
L{UserDatabase.getpwuid}, L{UserDatabase.getpwnam}, and
L{UserDatabase.getpwall}.
"""
username = 'alice'
password = 'secr3t'
uid = 123
gid = 456
gecos = 'Alice,,,'
home = '/users/alice'
shell = '/usr/bin/foosh'
db = self.database
db.addUser(username, password, uid, gid, gecos, home, shell)
for entry in [db.getpwuid(uid), db.getpwnam(username)]:
self.assertEquals(entry.pw_name, username)
self.assertEquals(entry.pw_passwd, password)
self.assertEquals(entry.pw_uid, uid)
self.assertEquals(entry.pw_gid, gid)
self.assertEquals(entry.pw_gecos, gecos)
self.assertEquals(entry.pw_dir, home)
self.assertEquals(entry.pw_shell, shell)
[entry] = db.getpwall()
self.assertEquals(entry.pw_name, username)
self.assertEquals(entry.pw_passwd, password)
self.assertEquals(entry.pw_uid, uid)
self.assertEquals(entry.pw_gid, gid)
self.assertEquals(entry.pw_gecos, gecos)
self.assertEquals(entry.pw_dir, home)
self.assertEquals(entry.pw_shell, shell)
class PwdModuleTests(TestCase, UserDatabaseTestsMixin):
"""
L{PwdModuleTests} runs the tests defined by L{UserDatabaseTestsMixin}
against the built-in C{pwd} module. This serves to verify that
L{UserDatabase} is really a fake of that API.
"""
if pwd is None:
skip = "Cannot verify UserDatabase against pwd without pwd"
def setUp(self):
self.database = pwd
self._users = iter(self.database.getpwall())
self._uids = set()
def getExistingUserInfo(self):
"""
Read and return the next record from C{self._users}, filtering out
any records with previously seen uid values (as these cannot be
found with C{getpwuid} and only cause trouble).
"""
while True:
entry = self._users.next()
if entry.pw_uid not in self._uids:
self._uids.add(entry.pw_uid)
return entry
| agpl-3.0 |
jshiv/turntable | test/lib/python2.7/site-packages/scipy/optimize/tests/test_optimize.py | 7 | 35456 | """
Unit tests for optimization routines from optimize.py
Authors:
Ed Schofield, Nov 2005
Andrew Straw, April 2008
To run it in its simplest form::
nosetests test_optimize.py
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.testing import (assert_raises, assert_allclose, assert_equal,
assert_, TestCase, run_module_suite, dec,
assert_almost_equal)
from scipy import optimize
def test_check_grad():
# Verify if check_grad is able to estimate the derivative of the
# logistic function.
def logit(x):
return 1 / (1 + np.exp(-x))
def der_logit(x):
return np.exp(-x) / (1 + np.exp(-x))**2
x0 = np.array([1.5])
r = optimize.check_grad(logit, der_logit, x0)
assert_almost_equal(r, 0)
r = optimize.check_grad(logit, der_logit, x0, epsilon=1e-6)
assert_almost_equal(r, 0)
# Check if the epsilon parameter is being considered.
r = abs(optimize.check_grad(logit, der_logit, x0, epsilon=1e-1) - 0)
assert_(r > 1e-7)
class TestOptimize(object):
""" Test case for a simple constrained entropy maximization problem
(the machine translation example of Berger et al in
Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
"""
def setUp(self):
self.F = np.array([[1,1,1],[1,1,0],[1,0,1],[1,0,0],[1,0,0]])
self.K = np.array([1., 0.3, 0.5])
self.startparams = np.zeros(3, np.float64)
self.solution = np.array([0., -0.524869316, 0.487525860])
self.maxiter = 1000
self.funccalls = 0
self.gradcalls = 0
self.trace = []
def func(self, x):
self.funccalls += 1
if self.funccalls > 6000:
raise RuntimeError("too many iterations in optimization routine")
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
f = logZ - np.dot(self.K, x)
self.trace.append(x)
return f
def grad(self, x):
self.gradcalls += 1
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
p = np.exp(log_pdot - logZ)
return np.dot(self.F.transpose(), p) - self.K
def hess(self, x):
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
p = np.exp(log_pdot - logZ)
return np.dot(self.F.T,
np.dot(np.diag(p), self.F - np.dot(self.F.T, p)))
def hessp(self, x, p):
return np.dot(self.hess(x), p)
def test_cg(self, use_wrapper=False):
""" conjugate gradient optimization routine """
if use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
method='CG', jac=self.grad,
options=opts)
params, fopt, func_calls, grad_calls, warnflag = \
res['x'], res['fun'], res['nfev'], res['njev'], res['status']
else:
retval = optimize.fmin_cg(self.func, self.startparams, self.grad, (),
maxiter=self.maxiter,
full_output=True, disp=False, retall=False)
(params, fopt, func_calls, grad_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 9, self.funccalls)
assert_(self.gradcalls == 7, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[2:4],
[[0, -0.5, 0.5],
[0, -5.05700028e-01, 4.95985862e-01]],
atol=1e-14, rtol=1e-7)
def test_bfgs(self, use_wrapper=False):
""" Broyden-Fletcher-Goldfarb-Shanno optimization routine """
if use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False}
res = optimize.minimize(self.func, self.startparams,
jac=self.grad, method='BFGS', args=(),
options=opts)
params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag = \
res['x'], res['fun'], res['jac'], res['hess_inv'], \
res['nfev'], res['njev'], res['status']
else:
retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad,
args=(), maxiter=self.maxiter,
full_output=True, disp=False, retall=False)
(params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 10, self.funccalls)
assert_(self.gradcalls == 8, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[6:8],
[[0, -5.25060743e-01, 4.87748473e-01],
[0, -5.24885582e-01, 4.87530347e-01]],
atol=1e-14, rtol=1e-7)
def test_bfgs_nan(self):
"""Test corner case where nan is fed to optimizer. See #1542."""
func = lambda x: x
fprime = lambda x: np.ones_like(x)
x0 = [np.nan]
with np.errstate(over='ignore', invalid='ignore'):
x = optimize.fmin_bfgs(func, x0, fprime, disp=False)
assert_(np.isnan(func(x)))
def test_bfgs_numerical_jacobian(self):
""" BFGS with numerical jacobian and a vector epsilon parameter """
# define the epsilon parameter using a random vector
epsilon = np.sqrt(np.finfo(float).eps) * np.random.rand(len(self.solution))
params = optimize.fmin_bfgs(self.func, self.startparams,
epsilon=epsilon, args=(),
maxiter=self.maxiter, disp=False)
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_bfgs_infinite(self, use_wrapper=False):
"""Test corner case where -Inf is the minimum. See #1494."""
func = lambda x: -np.e**-x
fprime = lambda x: -func(x)
x0 = [0]
olderr = np.seterr(over='ignore')
try:
if use_wrapper:
opts = {'disp': False}
x = optimize.minimize(func, x0, jac=fprime, method='BFGS',
args=(), options=opts)['x']
else:
x = optimize.fmin_bfgs(func, x0, fprime, disp=False)
assert_(not np.isfinite(func(x)))
finally:
np.seterr(**olderr)
def test_bfgs_gh_2169(self):
def f(x):
if x < 0:
return 1.79769313e+308
else:
return x + 1./x
xs = optimize.fmin_bfgs(f, [10.], disp=False)
assert_allclose(xs, 1.0, rtol=1e-4, atol=1e-4)
def test_powell(self, use_wrapper=False):
""" Powell (direction set) optimization routine
"""
if use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
method='Powell', options=opts)
params, fopt, direc, numiter, func_calls, warnflag = \
res['x'], res['fun'], res['direc'], res['nit'], \
res['nfev'], res['status']
else:
retval = optimize.fmin_powell(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=False, retall=False)
(params, fopt, direc, numiter, func_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
#
# However, some leeway must be added: the exact evaluation
# count is sensitive to numerical error, and floating-point
# computations are not bit-for-bit reproducible across
# machines, and when using e.g. MKL, data alignment
# etc. affect the rounding error.
#
assert_(self.funccalls <= 116 + 20, self.funccalls)
assert_(self.gradcalls == 0, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[34:39],
[[0.72949016, -0.44156936, 0.47100962],
[0.72949016, -0.44156936, 0.48052496],
[1.45898031, -0.88313872, 0.95153458],
[0.72949016, -0.44156936, 0.47576729],
[1.72949016, -0.44156936, 0.47576729]],
atol=1e-14, rtol=1e-7)
def test_neldermead(self, use_wrapper=False):
""" Nelder-Mead simplex algorithm
"""
if use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
method='Nelder-mead', options=opts)
params, fopt, numiter, func_calls, warnflag = \
res['x'], res['fun'], res['nit'], res['nfev'], \
res['status']
else:
retval = optimize.fmin(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=False, retall=False)
(params, fopt, numiter, func_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 167, self.funccalls)
assert_(self.gradcalls == 0, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[76:78],
[[0.1928968, -0.62780447, 0.35166118],
[0.19572515, -0.63648426, 0.35838135]],
atol=1e-14, rtol=1e-7)
def test_ncg(self, use_wrapper=False):
""" line-search Newton conjugate gradient optimization routine
"""
if use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
args=(), maxiter=self.maxiter,
full_output=False, disp=False,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls <= 22, self.gradcalls) # 0.13.0
#assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
#assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
#assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_ncg_hess(self, use_wrapper=False):
""" Newton conjugate gradient with Hessian """
if use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
hess=self.hess,
args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
fhess=self.hess,
args=(), maxiter=self.maxiter,
full_output=False, disp=False,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
# assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
# assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_ncg_hessp(self, use_wrapper=False):
""" Newton conjugate gradient with Hessian times a vector p """
if use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
hessp=self.hessp,
args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
fhess_p=self.hessp,
args=(), maxiter=self.maxiter,
full_output=False, disp=False,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
# assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
# assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_l_bfgs_b(self):
""" limited-memory bound-constrained BFGS algorithm
"""
retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
self.grad, args=(),
maxiter=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls == 5, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[3:5],
[[0., -0.52489628, 0.48753042],
[0., -0.52489628, 0.48753042]],
atol=1e-14, rtol=1e-7)
def test_l_bfgs_b_numjac(self):
""" L-BFGS-B with numerical jacobian """
retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
approx_grad=True,
maxiter=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_l_bfgs_b_funjac(self):
""" L-BFGS-B with combined objective function and jacobian """
def fun(x):
return self.func(x), self.grad(x)
retval = optimize.fmin_l_bfgs_b(fun, self.startparams,
maxiter=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_minimize_l_bfgs_b(self):
""" Minimize with L-BFGS-B method """
opts = {'disp': False, 'maxiter': self.maxiter}
r = optimize.minimize(self.func, self.startparams,
method='L-BFGS-B', jac=self.grad,
options=opts)
assert_allclose(self.func(r.x), self.func(self.solution),
atol=1e-6)
# approximate jacobian
ra = optimize.minimize(self.func, self.startparams,
method='L-BFGS-B', options=opts)
assert_allclose(self.func(ra.x), self.func(self.solution),
atol=1e-6)
# check that function evaluations in approximate jacobian are counted
assert_(ra.nfev > r.nfev)
def test_minimize_l_bfgs_b_ftol(self):
# Check that the `ftol` parameter in l_bfgs_b works as expected
v0 = None
for tol in [1e-1, 1e-4, 1e-7, 1e-10]:
opts = {'disp': False, 'maxiter': self.maxiter, 'ftol': tol}
sol = optimize.minimize(self.func, self.startparams,
method='L-BFGS-B', jac=self.grad,
options=opts)
v = self.func(sol.x)
if v0 is None:
v0 = v
else:
assert_(v < v0)
assert_allclose(v, self.func(self.solution), rtol=tol)
def test_custom(self):
# This function comes from the documentation example.
def custmin(fun, x0, args=(), maxfev=None, stepsize=0.1,
maxiter=100, callback=None, **options):
bestx = x0
besty = fun(x0)
funcalls = 1
niter = 0
improved = True
stop = False
while improved and not stop and niter < maxiter:
improved = False
niter += 1
for dim in range(np.size(x0)):
for s in [bestx[dim] - stepsize, bestx[dim] + stepsize]:
testx = np.copy(bestx)
testx[dim] = s
testy = fun(testx, *args)
funcalls += 1
if testy < besty:
besty = testy
bestx = testx
improved = True
if callback is not None:
callback(bestx)
if maxfev is not None and funcalls >= maxfev:
stop = True
break
return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter,
nfev=funcalls, success=(niter > 1))
x0 = [1.35, 0.9, 0.8, 1.1, 1.2]
res = optimize.minimize(optimize.rosen, x0, method=custmin,
options=dict(stepsize=0.05))
assert_allclose(res.x, 1.0, rtol=1e-4, atol=1e-4)
def test_minimize(self):
"""Tests for the minimize wrapper."""
self.setUp()
self.test_bfgs(True)
self.setUp()
self.test_bfgs_infinite(True)
self.setUp()
self.test_cg(True)
self.setUp()
self.test_ncg(True)
self.setUp()
self.test_ncg_hess(True)
self.setUp()
self.test_ncg_hessp(True)
self.setUp()
self.test_neldermead(True)
self.setUp()
self.test_powell(True)
self.setUp()
self.test_custom()
def test_minimize_tol_parameter(self):
# Check that the minimize() tol= argument does something
def func(z):
x, y = z
return x**2*y**2 + x**4 + 1
def dfunc(z):
x, y = z
return np.array([2*x*y**2 + 4*x**3, 2*x**2*y])
for method in ['nelder-mead', 'powell', 'cg', 'bfgs',
'newton-cg', 'anneal', 'l-bfgs-b', 'tnc',
'cobyla', 'slsqp']:
if method in ('nelder-mead', 'powell', 'anneal', 'cobyla'):
jac = None
else:
jac = dfunc
with warnings.catch_warnings():
# suppress deprecation warning for 'anneal'
warnings.filterwarnings('ignore', category=DeprecationWarning)
sol1 = optimize.minimize(func, [1,1], jac=jac, tol=1e-10,
method=method)
sol2 = optimize.minimize(func, [1,1], jac=jac, tol=1.0,
method=method)
assert_(func(sol1.x) < func(sol2.x),
"%s: %s vs. %s" % (method, func(sol1.x), func(sol2.x)))
def test_no_increase(self):
# Check that the solver doesn't return a value worse than the
# initial point.
def func(x):
return (x - 1)**2
def bad_grad(x):
# purposefully invalid gradient function, simulates a case
# where line searches start failing
return 2*(x - 1) * (-1) - 2
def check(method):
x0 = np.array([2.0])
f0 = func(x0)
jac = bad_grad
if method in ['nelder-mead', 'powell', 'anneal', 'cobyla']:
jac = None
sol = optimize.minimize(func, x0, jac=jac, method=method,
options=dict(maxiter=20))
assert_equal(func(sol.x), sol.fun)
dec.knownfailureif(method == 'slsqp', "SLSQP returns slightly worse")(lambda: None)()
assert_(func(sol.x) <= f0)
for method in ['nelder-mead', 'powell', 'cg', 'bfgs',
'newton-cg', 'l-bfgs-b', 'tnc',
'cobyla', 'slsqp']:
yield check, method
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
yield check, 'anneal'
def test_slsqp_respect_bounds(self):
# github issue 3108
def f(x):
return sum((x - np.array([1., 2., 3., 4.]))**2)
def cons(x):
a = np.array([[-1, -1, -1, -1], [-3, -3, -2, -1]])
return np.concatenate([np.dot(a, x) + np.array([5, 10]), x])
x0 = np.array([0.5, 1., 1.5, 2.])
res = optimize.minimize(f, x0, method='slsqp',
constraints={'type': 'ineq', 'fun': cons})
assert_allclose(res.x, np.array([0., 2, 5, 8])/3, atol=1e-12)
def test_minimize_automethod(self):
def f(x):
return x**2
def cons(x):
return x - 2
x0 = np.array([10.])
sol_0 = optimize.minimize(f, x0)
sol_1 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', 'fun': cons}])
sol_2 = optimize.minimize(f, x0, bounds=[(5, 10)])
sol_3 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', 'fun': cons}], bounds=[(5, 10)])
sol_4 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', 'fun': cons}], bounds=[(1, 10)])
for sol in [sol_0, sol_1, sol_2, sol_3, sol_4]:
assert_(sol.success)
assert_allclose(sol_0.x, 0, atol=1e-8)
assert_allclose(sol_1.x, 2, atol=1e-8)
assert_allclose(sol_2.x, 5, atol=1e-8)
assert_allclose(sol_3.x, 5, atol=1e-8)
assert_allclose(sol_4.x, 2, atol=1e-8)
def test_minimize_coerce_args_param(self):
# github issue #3503
def Y(x, c):
return np.sum((x-c)**2)
def dY_dx(x, c=None):
return 2*(x-c)
c = np.array([3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5])
xinit = np.random.randn(len(c))
optimize.minimize(Y, xinit, jac=dY_dx, args=(c), method="BFGS")
class TestLBFGSBBounds(TestCase):
""" Tests for L-BFGS-B with bounds """
def setUp(self):
self.bounds = ((1, None), (None, None))
self.solution = (1, 0)
def fun(self, x, p=2.0):
return 1.0 / p * (x[0]**p + x[1]**p)
def jac(self, x, p=2.0):
return x**(p - 1)
def fj(self, x, p=2.0):
return self.fun(x, p), self.jac(x, p)
def test_l_bfgs_b_bounds(self):
""" L-BFGS-B with bounds """
x, f, d = optimize.fmin_l_bfgs_b(self.fun, [0, -1],
fprime=self.jac,
bounds=self.bounds)
assert_(d['warnflag'] == 0, d['task'])
assert_allclose(x, self.solution, atol=1e-6)
def test_l_bfgs_b_funjac(self):
""" L-BFGS-B with fun and jac combined and extra arguments """
x, f, d = optimize.fmin_l_bfgs_b(self.fj, [0, -1], args=(2.0, ),
bounds=self.bounds)
assert_(d['warnflag'] == 0, d['task'])
assert_allclose(x, self.solution, atol=1e-6)
def test_minimize_l_bfgs_b_bounds(self):
""" Minimize with method='L-BFGS-B' with bounds """
res = optimize.minimize(self.fun, [0, -1], method='L-BFGS-B',
jac=self.jac, bounds=self.bounds)
assert_(res['success'], res['message'])
assert_allclose(res.x, self.solution, atol=1e-6)
class TestOptimizeScalar(TestCase):
"""Tests for scalar optimizers"""
def setUp(self):
self.solution = 1.5
def fun(self, x, a=1.5):
"""Objective function"""
return (x - a)**2 - 0.8
def test_brent(self):
""" brent algorithm """
x = optimize.brent(self.fun)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.brent(self.fun, brack=(-3, -2))
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.brent(self.fun, full_output=True)
assert_allclose(x[0], self.solution, atol=1e-6)
x = optimize.brent(self.fun, brack=(-15, -1, 15))
assert_allclose(x, self.solution, atol=1e-6)
def test_golden(self):
""" golden algorithm """
x = optimize.golden(self.fun)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.golden(self.fun, brack=(-3, -2))
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.golden(self.fun, full_output=True)
assert_allclose(x[0], self.solution, atol=1e-6)
x = optimize.golden(self.fun, brack=(-15, -1, 15))
assert_allclose(x, self.solution, atol=1e-6)
def test_fminbound(self):
"""Test fminbound """
x = optimize.fminbound(self.fun, 0, 1)
assert_allclose(x, 1, atol=1e-4)
x = optimize.fminbound(self.fun, 1, 5)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.fminbound(self.fun, np.array([1]), np.array([5]))
assert_allclose(x, self.solution, atol=1e-6)
assert_raises(ValueError, optimize.fminbound, self.fun, 5, 1)
def test_fminbound_scalar(self):
try:
optimize.fminbound(self.fun, np.zeros((1, 2)), 1)
self.fail("exception not raised")
except ValueError as e:
assert_('must be scalar' in str(e))
x = optimize.fminbound(self.fun, 1, np.array(5))
assert_allclose(x, self.solution, atol=1e-6)
def test_minimize_scalar(self):
# combine all tests above for the minimize_scalar wrapper
x = optimize.minimize_scalar(self.fun).x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bracket=(-3, -2),
args=(1.5, ), method='Brent').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, method='Brent',
args=(1.5,)).x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
args=(1.5, ), method='Brent').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bracket=(-3, -2),
args=(1.5, ), method='golden').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, method='golden',
args=(1.5,)).x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
args=(1.5, ), method='golden').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bounds=(0, 1), args=(1.5,),
method='Bounded').x
assert_allclose(x, 1, atol=1e-4)
x = optimize.minimize_scalar(self.fun, bounds=(1, 5), args=(1.5, ),
method='bounded').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bounds=(np.array([1]),
np.array([5])),
args=(np.array([1.5]), ),
method='bounded').x
assert_allclose(x, self.solution, atol=1e-6)
assert_raises(ValueError, optimize.minimize_scalar, self.fun,
bounds=(5, 1), method='bounded', args=(1.5, ))
assert_raises(ValueError, optimize.minimize_scalar, self.fun,
bounds=(np.zeros(2), 1), method='bounded', args=(1.5, ))
x = optimize.minimize_scalar(self.fun, bounds=(1, np.array(5)),
method='bounded').x
assert_allclose(x, self.solution, atol=1e-6)
def test_minimize_scalar_custom(self):
# This function comes from the documentation example.
def custmin(fun, bracket, args=(), maxfev=None, stepsize=0.1,
maxiter=100, callback=None, **options):
bestx = (bracket[1] + bracket[0]) / 2.0
besty = fun(bestx)
funcalls = 1
niter = 0
improved = True
stop = False
while improved and not stop and niter < maxiter:
improved = False
niter += 1
for testx in [bestx - stepsize, bestx + stepsize]:
testy = fun(testx, *args)
funcalls += 1
if testy < besty:
besty = testy
bestx = testx
improved = True
if callback is not None:
callback(bestx)
if maxfev is not None and funcalls >= maxfev:
stop = True
break
return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter,
nfev=funcalls, success=(niter > 1))
res = optimize.minimize_scalar(self.fun, bracket=(0, 4), method=custmin,
options=dict(stepsize=0.05))
assert_allclose(res.x, self.solution, atol=1e-6)
def test_minimize_scalar_coerce_args_param(self):
# github issue #3503
optimize.minimize_scalar(self.fun, args=1.5)
class TestNewtonCg(object):
def test_rosenbrock(self):
x0 = np.array([-1.2, 1.0])
sol = optimize.minimize(optimize.rosen, x0,
jac=optimize.rosen_der,
hess=optimize.rosen_hess,
tol=1e-5,
method='Newton-CG')
assert_(sol.success, sol.message)
assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4)
def test_himmelblau(self):
x0 = np.array(himmelblau_x0)
sol = optimize.minimize(himmelblau,
x0,
jac=himmelblau_grad,
hess=himmelblau_hess,
method='Newton-CG',
tol=1e-6)
assert_(sol.success, sol.message)
assert_allclose(sol.x, himmelblau_xopt, rtol=1e-4)
assert_allclose(sol.fun, himmelblau_min, atol=1e-4)
class TestRosen(TestCase):
def test_hess(self):
"""Compare rosen_hess(x) times p with rosen_hess_prod(x,p) (ticket #1248)"""
x = np.array([3, 4, 5])
p = np.array([2, 2, 2])
hp = optimize.rosen_hess_prod(x, p)
dothp = np.dot(optimize.rosen_hess(x), p)
assert_equal(hp, dothp)
def himmelblau(p):
"""
R^2 -> R^1 test function for optimization. The function has four local
minima where himmelblau(xopt) == 0.
"""
x, y = p
a = x*x + y - 11
b = x + y*y - 7
return a*a + b*b
def himmelblau_grad(p):
x, y = p
return np.array([4*x**3 + 4*x*y - 42*x + 2*y**2 - 14,
2*x**2 + 4*x*y + 4*y**3 - 26*y - 22])
def himmelblau_hess(p):
x, y = p
return np.array([[12*x**2 + 4*y - 42, 4*x + 4*y],
[4*x + 4*y, 4*x + 12*y**2 - 26]])
himmelblau_x0 = [-0.27, -0.9]
himmelblau_xopt = [3, 2]
himmelblau_min = 0.0
def test_minimize_multiple_constraints():
# Regression test for gh-4240.
def func(x):
return np.array([25 - 0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]])
def func1(x):
return np.array([x[1]])
def func2(x):
return np.array([x[2]])
cons = ({'type': 'ineq', 'fun': func},
{'type': 'ineq', 'fun': func1},
{'type': 'ineq', 'fun': func2})
f = lambda x: -1 * (x[0] + x[1] + x[2])
res = optimize.minimize(f, [0, 0, 0], method='SLSQP', constraints=cons)
assert_allclose(res.x, [125, 0, 0], atol=1e-10)
if __name__ == "__main__":
run_module_suite()
| mit |
EvanK/ansible | lib/ansible/modules/remote_management/ucs/ucs_disk_group_policy.py | 21 | 16473 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: ucs_disk_group_policy
short_description: Configures disk group policies on Cisco UCS Manager
description:
- Configures disk group policies on Cisco UCS Manager.
- Examples can be used with the L(UCS Platform Emulator,https://communities.cisco.com/ucspe).
extends_documentation_fragment: ucs
options:
state:
description:
- Desired state of the disk group policy.
- If C(present), will verify that the disk group policy is present and will create if needed.
- If C(absent), will verify that the disk group policy is absent and will delete if needed.
choices: [present, absent]
default: present
name:
description:
- The name of the disk group policy.
This name can be between 1 and 16 alphanumeric characters.
- "You cannot use spaces or any special characters other than - (hyphen), \"_\" (underscore), : (colon), and . (period)."
- You cannot change this name after the policy is created.
required: yes
description:
description:
- The user-defined description of the storage profile.
Enter up to 256 characters.
"You can use any characters or spaces except the following:"
"` (accent mark), \ (backslash), ^ (carat), \" (double quote), = (equal sign), > (greater than), < (less than), or ' (single quote)."
aliases: [ descr ]
raid_level:
description:
- "The RAID level for the disk group policy. This can be one of the following:"
- "stripe - UCS Manager shows RAID 0 Striped"
- "mirror - RAID 1 Mirrored"
- "mirror-stripe - RAID 10 Mirrored and Striped"
- "stripe-parity - RAID 5 Striped Parity"
- "stripe-dual-parity - RAID 6 Striped Dual Parity"
- "stripe-parity-stripe - RAID 50 Striped Parity and Striped"
- "stripe-dual-parity-stripe - RAID 60 Striped Dual Parity and Striped"
choices: [stripe, mirror, mirror-stripe, stripe-parity, stripe-dual-parity, stripe-parity-stripe, stripe-dual-parity-stripe]
default: stripe
configuration_mode:
description:
- "Disk group configuration mode. Choose one of the following:"
- "automatic - Automatically configures the disks in the disk group."
- "manual - Enables you to manually configure the disks in the disk group."
choices: [automatic, manual]
default: automatic
num_drives:
description:
- Specify the number of drives for the disk group.
- This can be from 0 to 24.
- Option only applies when configuration mode is automatic.
default: 1
drive_type:
description:
- Specify the drive type to use in the drive group.
- "This can be one of the following:"
- "unspecified — Selects the first available drive type, and applies that to all drives in the group."
- "HDD — Hard disk drive"
- "SSD — Solid state drive"
- Option only applies when configuration mode is automatic.
choices: [unspecified, HDD, SSD]
default: unspecified
num_ded_hot_spares:
description:
- Specify the number of hot spares for the disk group.
- This can be from 0 to 24.
- Option only applies when configuration mode is automatic.
default: unspecified
num_glob_hot_spares:
description:
- Specify the number of global hot spares for the disk group.
- This can be from 0 to 24.
- Option only applies when configuration mode is automatic.
default: unspecified
min_drive_size:
description:
- Specify the minimum drive size or unspecified to allow all drive sizes.
- This can be from 0 to 10240 GB.
- Option only applies when configuration mode is automatic.
default: 'unspecified'
use_remaining_disks:
description:
- Specifies whether you can use all the remaining disks in the disk group or not.
- Option only applies when configuration mode is automatic.
choices: ['yes', 'no']
default: 'no'
manual_disks:
description:
- List of manually configured disks.
- Options are only used when you choose manual configuration_mode.
suboptions:
name:
description:
- The name of the local LUN.
required: yes
slot_num:
description:
- The slot number of the specific disk.
role:
description:
- "The role of the disk. This can be one of the following:"
- "normal - Normal"
- "ded-hot-spare - Dedicated Hot Spare"
- "glob-hot-spare - Glob Hot Spare"
span_id:
description:
- The Span ID of the specific disk.
default: 'unspecified'
state:
description:
- If C(present), will verify disk slot is configured within policy.
If C(absent), will verify disk slot is absent from policy.
choices: [ present, absent ]
default: present
virtual_drive:
description:
- Configuraiton of virtual drive options.
suboptions:
access_policy:
description:
- Configure access policy to virtual drive.
choices: [blocked, hidden, platform-default, read-only, read-write, transport-ready]
default: platform-default
drive_cache:
description:
- Configure drive caching.
choices: [disable, enable, no-change, platform-default]
default: platform-default
io_policy:
description:
- Direct or Cached IO path.
choices: [cached, direct, platform-default]
default: platform-default
read_policy:
description:
- Read access policy to virtual drive.
choices: [normal, platform-default, read-ahead]
default: platform-default
strip_size:
description:
- Virtual drive strip size.
choices: [ present, absent ]
default: platform-default
write_cache_policy:
description:
- Write back cache policy.
choices: [always-write-back, platform-default, write-back-good-bbu, write-through]
default: platform-default
org_dn:
description:
- The distinguished name (dn) of the organization where the resource is assigned.
default: org-root
requirements:
- ucsmsdk
author:
- Sindhu Sudhir (@sisudhir)
- David Soper (@dsoper2)
- CiscoUcs (@CiscoUcs)
- Brett Johnson (@sdbrett)
version_added: '2.8'
'''
EXAMPLES = r'''
- name: Configure Disk Group Policy
ucs_disk_group_policy:
hostname: 172.16.143.150
username: admin
password: password
name: DEE-DG
raid_level: mirror
configuration_mode: manual
manual_disks:
- slot_num: '1'
role: normal
- slot_num: '2'
role: normal
- name: Remove Disk Group Policy
ucs_disk_group_policy:
name: DEE-DG
hostname: 172.16.143.150
username: admin
password: password
state: absent
- name: Remove Disk from Policy
ucs_disk_group_policy:
hostname: 172.16.143.150
username: admin
password: password
name: DEE-DG
description: Testing Ansible
raid_level: stripe
configuration_mode: manual
manual_disks:
- slot_num: '1'
role: normal
- slot_num: '2'
role: normal
state: absent
virtual_drive:
access_policy: platform-default
io_policy: direct
strip_size: 64KB
'''
RETURN = r'''
#
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.remote_management.ucs import UCSModule, ucs_argument_spec
def configure_disk_policy(ucs, module, dn):
from ucsmsdk.mometa.lstorage.LstorageDiskGroupConfigPolicy import LstorageDiskGroupConfigPolicy
from ucsmsdk.mometa.lstorage.LstorageDiskGroupQualifier import LstorageDiskGroupQualifier
from ucsmsdk.mometa.lstorage.LstorageLocalDiskConfigRef import LstorageLocalDiskConfigRef
if not module.check_mode:
try:
# create if mo does not already exist
mo = LstorageDiskGroupConfigPolicy(
parent_mo_or_dn=module.params['org_dn'],
name=module.params['name'],
descr=module.params['description'],
raid_level=module.params['raid_level'],
)
if module.params['configuration_mode'] == 'automatic':
LstorageDiskGroupQualifier(
parent_mo_or_dn=mo,
num_drives=module.params['num_drives'],
drive_type=module.params['drive_type'],
use_remaining_disks=module.params['use_remaining_disks'],
num_ded_hot_spares=module.params['num_ded_hot_spares'],
num_glob_hot_spares=module.params['num_glob_hot_spares'],
min_drive_size=module.params['min_drive_size'],
)
else: # configuration_mode == 'manual'
for disk in module.params['manual_disks']:
if disk['state'] == 'absent':
child_dn = dn + '/slot-' + disk['slot_num']
mo_1 = ucs.login_handle.query_dn(child_dn)
if mo_1:
ucs.login_handle.remove_mo(mo_1)
else: # state == 'present'
LstorageLocalDiskConfigRef(
parent_mo_or_dn=mo,
slot_num=disk['slot_num'],
role=disk['role'],
span_id=disk['span_id'],
)
if module.params['virtual_drive']:
_configure_virtual_drive(module, mo)
ucs.login_handle.add_mo(mo, True)
ucs.login_handle.commit()
except Exception as e: # generic Exception handling because SDK can throw a variety
ucs.result['msg'] = "setup error: %s " % str(e)
module.fail_json(**ucs.result)
ucs.result['changed'] = True
def check_disk_policy_props(ucs, module, mo, dn):
props_match = True
# check top-level mo props
kwargs = dict(descr=module.params['description'])
kwargs['raid_level'] = module.params['raid_level']
if mo.check_prop_match(**kwargs):
# top-level props match, check next level mo/props
if module.params['configuration_mode'] == 'automatic':
child_dn = dn + '/disk-group-qual'
mo_1 = ucs.login_handle.query_dn(child_dn)
if mo_1:
kwargs = dict(num_drives=module.params['num_drives'])
kwargs['drive_type'] = module.params['drive_type']
kwargs['use_remaining_disks'] = module.params['use_remaining_disks']
kwargs['num_ded_hot_spares'] = module.params['num_ded_hot_spares']
kwargs['num_glob_hot_spares'] = module.params['num_glob_hot_spares']
kwargs['min_drive_size'] = module.params['min_drive_size']
props_match = mo_1.check_prop_match(**kwargs)
else: # configuration_mode == 'manual'
for disk in module.params['manual_disks']:
child_dn = dn + '/slot-' + disk['slot_num']
mo_1 = ucs.login_handle.query_dn(child_dn)
if mo_1:
if disk['state'] == 'absent':
props_match = False
else: # state == 'present'
kwargs = dict(slot_num=disk['slot_num'])
kwargs['role'] = disk['role']
kwargs['span_id'] = disk['span_id']
if not mo_1.check_prop_match(**kwargs):
props_match = False
break
if props_match:
if module.params['virtual_drive']:
props_match = check_virtual_drive_props(ucs, module, dn)
else:
props_match = False
return props_match
def check_virtual_drive_props(ucs, module, dn):
child_dn = dn + '/virtual-drive-def'
mo_1 = ucs.login_handle.query_dn(child_dn)
return mo_1.check_prop_match(**module.params['virtual_drive'])
def _configure_virtual_drive(module, mo):
from ucsmsdk.mometa.lstorage.LstorageVirtualDriveDef import LstorageVirtualDriveDef
LstorageVirtualDriveDef(parent_mo_or_dn=mo, **module.params['virtual_drive'])
def _virtual_drive_argument_spec():
return dict(
access_policy=dict(type='str', default='platform-default',
choices=["blocked", "hidden", "platform-default", "read-only", "read-write",
"transport-ready"]),
drive_cache=dict(type='str', default='platform-default',
choices=["disable", "enable", "no-change", "platform-default"]),
io_policy=dict(type='str', default='platform-default',
choices=["cached", "direct", "platform-default"]),
read_policy=dict(type='str', default='platform-default',
choices=["normal", "platform-default", "read-ahead"]),
strip_size=dict(type='str', default='platform-default',
choices=["1024KB", "128KB", "16KB", "256KB", "32KB", "512KB", "64KB", "8KB",
"platform-default"]),
write_cache_policy=dict(type='str', default='platform-default',
choices=["always-write-back", "platform-default", "write-back-good-bbu",
"write-through"]),
)
def main():
manual_disk = dict(
slot_num=dict(type='str', required=True),
role=dict(type='str', default='normal', choices=['normal', 'ded-hot-spare', 'glob-hot-spare']),
span_id=dict(type='str', default='unspecified'),
state=dict(type='str', default='present', choices=['present', 'absent']),
)
argument_spec = ucs_argument_spec
argument_spec.update(
org_dn=dict(type='str', default='org-root'),
name=dict(type='str', required=True),
description=dict(type='str', aliases=['descr'], default=''),
raid_level=dict(
type='str',
default='stripe',
choices=[
'stripe',
'mirror',
'mirror-stripe',
'stripe-parity',
'stripe-dual-parity',
'stripe-parity-stripe',
'stripe-dual-parity-stripe',
],
),
num_drives=dict(type='str', default='1'),
configuration_mode=dict(type='str', default='automatic', choices=['automatic', 'manual']),
num_ded_hot_spares=dict(type='str', default='unspecified'),
num_glob_hot_spares=dict(type='str', default='unspecified'),
drive_type=dict(type='str', default='unspecified', choices=['unspecified', 'HDD', 'SSD']),
use_remaining_disks=dict(type='str', default='no', choices=['yes', 'no']),
min_drive_size=dict(type='str', default='unspecified'),
manual_disks=dict(type='list', elements='dict', options=manual_disk),
state=dict(type='str', default='present', choices=['present', 'absent']),
virtual_drive=dict(type='dict', options=_virtual_drive_argument_spec()),
)
module = AnsibleModule(
argument_spec,
supports_check_mode=True,
)
ucs = UCSModule(module)
# UCSModule creation above verifies ucsmsdk is present and exits on failure.
# Additional imports are done below or in called functions.
ucs.result['changed'] = False
props_match = False
# dn is <org_dn>/disk-group-config-<name>
dn = module.params['org_dn'] + '/disk-group-config-' + module.params['name']
mo = ucs.login_handle.query_dn(dn)
if mo:
if module.params['state'] == 'absent':
# mo must exist but all properties do not have to match
if not module.check_mode:
ucs.login_handle.remove_mo(mo)
ucs.login_handle.commit()
ucs.result['changed'] = True
else: # state == 'present'
props_match = check_disk_policy_props(ucs, module, mo, dn)
if module.params['state'] == 'present' and not props_match:
configure_disk_policy(ucs, module, dn)
module.exit_json(**ucs.result)
if __name__ == '__main__':
main()
| gpl-3.0 |
michaelhowden/eden | modules/unit_tests/s3/s3model.py | 14 | 8372 | # -*- coding: utf-8 -*-
#
# S3 Model Unit Tests
#
# To run this script use:
# python web2py.py -S eden -M -R applications/eden/modules/unit_tests/s3/s3model.py
#
import unittest
from gluon import current
from s3.s3fields import s3_meta_fields
# =============================================================================
class S3ModelTests(unittest.TestCase):
pass
# =============================================================================
class S3SuperEntityTests(unittest.TestCase):
# -------------------------------------------------------------------------
@classmethod
def setUpClass(cls):
s3db = current.s3db
# Create super-entity
s3db.super_entity("setest_super",
"se_id",
{"setest_master": "SE Test Master"})
# Add components to the SE
s3db.add_components("setest_super",
setest_component_cascade="se_id",
setest_component_setnull="se_id",
setest_component_restrict="se_id",
)
# Define master table
s3db.define_table("setest_master",
s3db.super_link("se_id", "setest_super"),
*s3_meta_fields())
# Make instance
s3db.configure("setest_master",
super_entity = "setest_super")
# Define component tables with constraints
s3db.define_table("setest_component_cascade",
s3db.super_link("se_id", "setest_super",
ondelete="CASCADE"),
*s3_meta_fields())
s3db.define_table("setest_component_setnull",
s3db.super_link("se_id", "setest_super",
ondelete="SET NULL"),
*s3_meta_fields())
s3db.define_table("setest_component_restrict",
s3db.super_link("se_id", "setest_super",
ondelete="RESTRICT"),
*s3_meta_fields())
current.db.commit()
# -------------------------------------------------------------------------
@classmethod
def tearDownClass(cls):
db = current.db
# Drop all test tables
db.setest_component_cascade.drop()
db.setest_component_setnull.drop()
db.setest_component_restrict.drop()
db.setest_master.drop()
db.setest_super.drop()
current.db.commit()
# -------------------------------------------------------------------------
def setUp(self):
s3db = current.s3db
# Create the master record and link it to the SE
master_table = s3db.setest_master
master_id = master_table.insert()
s3db.update_super(master_table, {"id": master_id})
self.master_id = master_id
current.auth.override = True
# -------------------------------------------------------------------------
def tearDown(self):
current.db.rollback()
current.auth.override = False
# -------------------------------------------------------------------------
def testDeleteSuper(self):
""" Test delete_super without constraints """
s3db = current.s3db
# Get the master record
master_table = s3db.setest_master
record = master_table[self.master_id]
se_id = record.se_id
# Try delete the super-record (returns True)
success = s3db.delete_super(master_table, record)
self.assertTrue(success)
# Super-key is removed
record = master_table[self.master_id]
self.assertEqual(record.se_id, None)
# Super-record is deleted
super_table = s3db.setest_super
super_record = super_table[se_id]
self.assertTrue(super_record.deleted)
# -------------------------------------------------------------------------
def testDeleteSuperCascade(self):
""" Test delete_super with CASCADE constraint """
s3db = current.s3db
# Get the master record
master_table = s3db.setest_master
record = master_table[self.master_id]
se_id = record.se_id
# Create a component record
component_table = s3db.setest_component_cascade
component_id = component_table.insert(se_id=se_id)
component_record = component_table[component_id]
self.assertNotEqual(component_record, None)
# Try delete the super-record (returns True)
success = s3db.delete_super(master_table, record)
self.assertTrue(success)
# Super-key is removed
record = master_table[self.master_id]
self.assertEqual(record.se_id, None)
# Component record is deleted
component_record = component_table[component_id]
self.assertTrue(component_record.deleted)
self.assertEqual(component_record.se_id, None)
# Super-record is deleted
super_table = s3db.setest_super
super_record = super_table[se_id]
self.assertTrue(super_record.deleted)
# -------------------------------------------------------------------------
def testDeleteSuperSetNull(self):
""" Test delete_super with SET NULL constraint """
s3db = current.s3db
# Get the master record
master_table = s3db.setest_master
record = master_table[self.master_id]
se_id = record.se_id
# Create a component record
component_table = s3db.setest_component_setnull
component_id = component_table.insert(se_id=se_id)
component_record = component_table[component_id]
self.assertNotEqual(component_record, None)
# Try delete the super-record (returns True)
success = s3db.delete_super(master_table, record)
self.assertTrue(success)
# Super-key is removed
record = master_table[self.master_id]
self.assertEqual(record.se_id, None)
# Component record is not deleted, but unlinked
component_record = component_table[component_id]
self.assertFalse(component_record.deleted)
self.assertEqual(component_record.se_id, None)
# Super-record is deleted
super_table = s3db.setest_super
super_record = super_table[se_id]
self.assertTrue(super_record.deleted)
# -------------------------------------------------------------------------
def testDeleteSuperRestrict(self):
""" Test delete_super with RESTRICT constraint """
s3db = current.s3db
# Get the master record
master_table = s3db.setest_master
record = master_table[self.master_id]
se_id = record.se_id
# Create a component record
component_table = s3db.setest_component_restrict
component_id = component_table.insert(se_id=se_id)
component_record = component_table[component_id]
self.assertNotEqual(component_record, None)
# Try delete the super-record (must return False)
success = s3db.delete_super(master_table, record)
self.assertFalse(success)
# Super-key is retained
record = master_table[self.master_id]
self.assertEqual(record.se_id, se_id)
# Component record is not deleted and still linked
component_record = component_table[component_id]
self.assertFalse(component_record.deleted)
self.assertEqual(component_record.se_id, se_id)
# Super-record is not deleted
super_table = s3db.setest_super
super_record = super_table[se_id]
self.assertFalse(super_record.deleted)
# =============================================================================
def run_suite(*test_classes):
""" Run the test suite """
loader = unittest.TestLoader()
suite = unittest.TestSuite()
for test_class in test_classes:
tests = loader.loadTestsFromTestCase(test_class)
suite.addTests(tests)
if suite is not None:
unittest.TextTestRunner(verbosity=2).run(suite)
return
if __name__ == "__main__":
run_suite(
#S3ModelTests,
S3SuperEntityTests,
)
# END ========================================================================
| mit |
pvreman/cobbler | koan/sub_process.py | 56 | 39931 | # subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# This module should remain compatible with Python 2.2, see PEP 291.
#
# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
r"""subprocess - Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes. This module
intends to replace several other, older modules and functions, like:
os.system
os.spawn*
os.popen*
popen2.*
commands.*
Information about how the subprocess module can be used to replace these
modules and functions can be found below.
Using the subprocess module
===========================
This module defines one class called Popen:
class Popen(args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
Arguments are:
args should be a string, or a sequence of program arguments. The
program to execute is normally the first item in the args sequence or
string, but can be explicitly set by using the executable argument.
On UNIX, with shell=False (default): In this case, the Popen class
uses os.execvp() to execute the child program. args should normally
be a sequence. A string will be treated as a sequence with the string
as the only item (the program to execute).
On UNIX, with shell=True: If args is a string, it specifies the
command string to execute through the shell. If args is a sequence,
the first item specifies the command string, and any additional items
will be treated as additional shell arguments.
On Windows: the Popen class uses CreateProcess() to execute the child
program, which operates on strings. If args is a sequence, it will be
converted to a string using the list2cmdline method. Please note that
not all MS Windows applications interpret the command line the same
way: The list2cmdline is designed for applications using the same
rules as the MS C runtime.
bufsize, if given, has the same meaning as the corresponding argument
to the built-in open() function: 0 means unbuffered, 1 means line
buffered, any other positive value means use a buffer of
(approximately) that size. A negative bufsize means to use the system
default, which usually means fully buffered. The default value for
bufsize is 0 (unbuffered).
stdin, stdout and stderr specify the executed programs' standard
input, standard output and standard error file handles, respectively.
Valid values are PIPE, an existing file descriptor (a positive
integer), an existing file object, and None. PIPE indicates that a
new pipe to the child should be created. With None, no redirection
will occur; the child's file handles will be inherited from the
parent. Additionally, stderr can be STDOUT, which indicates that the
stderr data from the applications should be captured into the same
file handle as for stdout.
If preexec_fn is set to a callable object, this object will be called
in the child process just before the child is executed.
If close_fds is true, all file descriptors except 0, 1 and 2 will be
closed before the child process is executed.
if shell is true, the specified command will be executed through the
shell.
If cwd is not None, the current directory will be changed to cwd
before the child is executed.
If env is not None, it defines the environment variables for the new
process.
If universal_newlines is true, the file objects stdout and stderr are
opened as a text files, but lines may be terminated by any of '\n',
the Unix end-of-line convention, '\r', the Macintosh convention or
'\r\n', the Windows convention. All of these external representations
are seen as '\n' by the Python program. Note: This feature is only
available if Python is built with universal newline support (the
default). Also, the newlines attribute of the file objects stdout,
stdin and stderr are not updated by the communicate() method.
The startupinfo and creationflags, if given, will be passed to the
underlying CreateProcess() function. They can specify things such as
appearance of the main window and priority for the new process.
(Windows only)
This module also defines two shortcut functions:
call(*args, **kwargs):
Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
Exceptions
----------
Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
'child_traceback', which is a string containing traceback information
from the childs point of view.
The most common exception raised is OSError. This occurs, for
example, when trying to execute a non-existent file. Applications
should prepare for OSErrors.
A ValueError will be raised if Popen is called with invalid arguments.
Security
--------
Unlike some other popen functions, this implementation will never call
/bin/sh implicitly. This means that all characters, including shell
metacharacters, can safely be passed to child processes.
Popen objects
=============
Instances of the Popen class have the following methods:
poll()
Check if child process has terminated. Returns returncode
attribute.
wait()
Wait for child process to terminate. Returns returncode attribute.
communicate(input=None)
Interact with process: Send data to stdin. Read data from stdout
and stderr, until end-of-file is reached. Wait for process to
terminate. The optional stdin argument should be a string to be
sent to the child process, or None, if no data should be sent to
the child.
communicate() returns a tuple (stdout, stderr).
Note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
The following attributes are also available:
stdin
If the stdin argument is PIPE, this attribute is a file object
that provides input to the child process. Otherwise, it is None.
stdout
If the stdout argument is PIPE, this attribute is a file object
that provides output from the child process. Otherwise, it is
None.
stderr
If the stderr argument is PIPE, this attribute is file object that
provides error output from the child process. Otherwise, it is
None.
pid
The process ID of the child process.
returncode
The child return code. A None value indicates that the process
hasn't terminated yet. A negative value -N indicates that the
child was terminated by signal N (UNIX only).
Replacing older functions with the subprocess module
====================================================
In this section, "a ==> b" means that b can be used as a replacement
for a.
Note: All functions in this section fail (more or less) silently if
the executed program cannot be found; this module raises an OSError
exception.
In the following examples, we assume that the subprocess module is
imported with "from subprocess import *".
Replacing /bin/sh shell backquote
---------------------------------
output=`mycmd myarg`
==>
output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
Replacing shell pipe line
-------------------------
output=`dmesg | grep hda`
==>
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
Replacing os.system()
---------------------
sts = os.system("mycmd" + " myarg")
==>
p = Popen("mycmd" + " myarg", shell=True)
sts = os.waitpid(p.pid, 0)
Note:
* Calling the program through the shell is usually not required.
* It's easier to look at the returncode attribute than the
exitstatus.
A more real-world example would look like this:
try:
retcode = call("mycmd" + " myarg", shell=True)
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal", -retcode
else:
print >>sys.stderr, "Child returned", retcode
except OSError, e:
print >>sys.stderr, "Execution failed:", e
Replacing os.spawn*
-------------------
P_NOWAIT example:
pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
==>
pid = Popen(["/bin/mycmd", "myarg"]).pid
P_WAIT example:
retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
==>
retcode = call(["/bin/mycmd", "myarg"])
Vector example:
os.spawnvp(os.P_NOWAIT, path, args)
==>
Popen([path] + args[1:])
Environment example:
os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
==>
Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
Replacing os.popen*
-------------------
pipe = os.popen(cmd, mode='r', bufsize)
==>
pipe = Popen(cmd, shell=True, bufsize=bufsize, stdout=PIPE).stdout
pipe = os.popen(cmd, mode='w', bufsize)
==>
pipe = Popen(cmd, shell=True, bufsize=bufsize, stdin=PIPE).stdin
(child_stdin, child_stdout) = os.popen2(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
(child_stdin,
child_stdout,
child_stderr) = os.popen3(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
(child_stdin,
child_stdout,
child_stderr) = (p.stdin, p.stdout, p.stderr)
(child_stdin, child_stdout_and_stderr) = os.popen4(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
(child_stdin, child_stdout_and_stderr) = (p.stdin, p.stdout)
Replacing popen2.*
------------------
Note: If the cmd argument to popen2 functions is a string, the command
is executed through /bin/sh. If it is a list, the command is directly
executed.
(child_stdout, child_stdin) = popen2.popen2("somestring", bufsize, mode)
==>
p = Popen(["somestring"], shell=True, bufsize=bufsize
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
(child_stdout, child_stdin) = popen2.popen2(["mycmd", "myarg"], bufsize, mode)
==>
p = Popen(["mycmd", "myarg"], bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
The popen2.Popen3 and popen3.Popen4 basically works as subprocess.Popen,
except that:
* subprocess.Popen raises an exception if the execution fails
* the capturestderr argument is replaced with the stderr argument.
* stdin=PIPE and stdout=PIPE must be specified.
* popen2 closes all filedescriptors by default, but you have to specify
close_fds=True with subprocess.Popen.
"""
import sys
mswindows = (sys.platform == "win32")
import os
import types
import traceback
if mswindows:
import threading
import msvcrt
if 0: # <-- change this to use pywin32 instead of the _subprocess driver
import pywintypes
from win32api import GetStdHandle, STD_INPUT_HANDLE, \
STD_OUTPUT_HANDLE, STD_ERROR_HANDLE
from win32api import GetCurrentProcess, DuplicateHandle, \
GetModuleFileName, GetVersion
from win32con import DUPLICATE_SAME_ACCESS, SW_HIDE
from win32pipe import CreatePipe
from win32process import CreateProcess, STARTUPINFO, \
GetExitCodeProcess, STARTF_USESTDHANDLES, \
STARTF_USESHOWWINDOW, CREATE_NEW_CONSOLE
from win32event import WaitForSingleObject, INFINITE, WAIT_OBJECT_0
else:
from _subprocess import *
class STARTUPINFO:
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
class pywintypes:
error = IOError
else:
import select
import errno
import fcntl
import pickle
__all__ = ["Popen", "PIPE", "STDOUT", "call"]
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# True/False does not exist on 2.2.0
try:
False
except NameError:
False = 0
True = 1
_active = []
def _cleanup():
for inst in _active[:]:
inst.poll()
PIPE = -1
STDOUT = -2
def call(*args, **kwargs):
"""Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
return Popen(*args, **kwargs).wait()
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/library/en-us/vccelng/htm/progs_12.asp
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg)
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backspaces.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backspaces, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
class Popen(object):
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
"""Create new Popen instance."""
_cleanup()
if not isinstance(bufsize, (int, long)):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
if close_fds:
raise ValueError("close_fds is not supported on Windows "
"platforms")
else:
# POSIX
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are None when not using PIPEs. The child objects are None
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
self._execute_child(args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
if p2cwrite:
self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
if c2pread:
if universal_newlines:
self.stdout = os.fdopen(c2pread, 'rU', bufsize)
else:
self.stdout = os.fdopen(c2pread, 'rb', bufsize)
if errread:
if universal_newlines:
self.stderr = os.fdopen(errread, 'rU', bufsize)
else:
self.stderr = os.fdopen(errread, 'rb', bufsize)
_active.append(self)
def _translate_newlines(self, data):
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
return data
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tupel with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
if stdin == None and stdout == None and stderr == None:
return (None, None, None, None, None, None)
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin == None:
p2cread = GetStdHandle(STD_INPUT_HANDLE)
elif stdin == PIPE:
p2cread, p2cwrite = CreatePipe(None, 0)
# Detach and turn into fd
p2cwrite = p2cwrite.Detach()
p2cwrite = msvcrt.open_osfhandle(p2cwrite, 0)
elif type(stdin) == types.IntType:
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout == None:
c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE)
elif stdout == PIPE:
c2pread, c2pwrite = CreatePipe(None, 0)
# Detach and turn into fd
c2pread = c2pread.Detach()
c2pread = msvcrt.open_osfhandle(c2pread, 0)
elif type(stdout) == types.IntType:
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr == None:
errwrite = GetStdHandle(STD_ERROR_HANDLE)
elif stderr == PIPE:
errread, errwrite = CreatePipe(None, 0)
# Detach and turn into fd
errread = errread.Detach()
errread = msvcrt.open_osfhandle(errread, 0)
elif stderr == STDOUT:
errwrite = c2pwrite
elif type(stderr) == types.IntType:
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
return DuplicateHandle(GetCurrentProcess(), handle,
GetCurrentProcess(), 0, 1,
DUPLICATE_SAME_ACCESS)
def _find_w9xpopen(self):
"""Find and return absolut path to w9xpopen.exe"""
w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
# Eeek - file-not-found - possibly an embedding
# situation - see if we can locate it in sys.exec_prefix
w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
raise RuntimeError("Cannot locate w9xpopen.exe, which is "
"needed for Popen to work with your "
"shell or platform.")
return w9xpopen
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (MS Windows version)"""
if not isinstance(args, types.StringTypes):
args = list2cmdline(args)
# Process startup details
default_startupinfo = STARTUPINFO()
if startupinfo == None:
startupinfo = default_startupinfo
if not None in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
default_startupinfo.dwFlags |= STARTF_USESHOWWINDOW
default_startupinfo.wShowWindow = SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = comspec + " /c " + args
if (GetVersion() >= 0x80000000L or
os.path.basename(comspec).lower() == "command.com"):
# Win9x, or using command.com on NT. We need to
# use the w9xpopen intermediate program. For more
# information, see KB Q150956
# (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
# Not passing CREATE_NEW_CONSOLE has been known to
# cause random failures on win9x. Specifically a
# dialog: "Your program accessed mem currently in
# use at xxx" and a hopeful warning about the
# stability of your system. Cost is Ctrl+C wont
# kill children.
creationflags |= CREATE_NEW_CONSOLE
# Start the process
try:
hp, ht, pid, tid = CreateProcess(executable, args,
# no special security
None, None,
# must inherit handles to pass std
# handles
1,
creationflags,
env,
cwd,
startupinfo)
except pywintypes.error, e:
# Translate pywintypes.error to WindowsError, which is
# a subclass of OSError. FIXME: We should really
# translate errno using _sys_errlist (or simliar), but
# how can this be done from Python?
raise WindowsError(*e.args)
# Retain the process handle, but close the thread handle
self._handle = hp
self.pid = pid
ht.Close()
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread != None:
p2cread.Close()
if c2pwrite != None:
c2pwrite.Close()
if errwrite != None:
errwrite.Close()
def poll(self):
"""Check if child process has terminated. Returns returncode
attribute."""
if self.returncode == None:
if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0:
self.returncode = GetExitCodeProcess(self._handle)
_active.remove(self)
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode == None:
obj = WaitForSingleObject(self._handle, INFINITE)
self.returncode = GetExitCodeProcess(self._handle)
_active.remove(self)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
stdout = None # Return
stderr = None # Return
if self.stdout:
stdout = []
stdout_thread = threading.Thread(target=self._readerthread,
args=(self.stdout, stdout))
stdout_thread.setDaemon(True)
stdout_thread.start()
if self.stderr:
stderr = []
stderr_thread = threading.Thread(target=self._readerthread,
args=(self.stderr, stderr))
stderr_thread.setDaemon(True)
stderr_thread.start()
if self.stdin:
if input != None:
self.stdin.write(input)
self.stdin.close()
if self.stdout:
stdout_thread.join()
if self.stderr:
stderr_thread.join()
# All data exchanged. Translate lists into strings.
if stdout != None:
stdout = stdout[0]
if stderr != None:
stderr = stderr[0]
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(open, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
else:
#
# POSIX methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tupel with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin == None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = os.pipe()
elif type(stdin) == types.IntType:
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout == None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = os.pipe()
elif type(stdout) == types.IntType:
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr == None:
pass
elif stderr == PIPE:
errread, errwrite = os.pipe()
elif stderr == STDOUT:
errwrite = c2pwrite
elif type(stderr) == types.IntType:
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _set_cloexec_flag(self, fd):
try:
cloexec_flag = fcntl.FD_CLOEXEC
except AttributeError:
cloexec_flag = 1
old = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
def _close_fds(self, but):
for i in range(3, MAXFD):
if i == but:
continue
try:
os.close(i)
except:
pass
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (POSIX version)"""
if isinstance(args, types.StringTypes):
args = [args]
if shell:
args = ["/bin/sh", "-c"] + args
if executable == None:
executable = args[0]
# For transferring possible exec failure from child to parent
# The first char specifies the exception type: 0 means
# OSError, 1 means some other error.
errpipe_read, errpipe_write = os.pipe()
self._set_cloexec_flag(errpipe_write)
self.pid = os.fork()
if self.pid == 0:
# Child
try:
# Close parent's pipe ends
if p2cwrite:
os.close(p2cwrite)
if c2pread:
os.close(c2pread)
if errread:
os.close(errread)
os.close(errpipe_read)
# Dup fds for child
if p2cread:
os.dup2(p2cread, 0)
if c2pwrite:
os.dup2(c2pwrite, 1)
if errwrite:
os.dup2(errwrite, 2)
# Close pipe fds. Make sure we doesn't close the same
# fd more than once.
if p2cread:
os.close(p2cread)
if c2pwrite and c2pwrite not in (p2cread,):
os.close(c2pwrite)
if errwrite and errwrite not in (p2cread, c2pwrite):
os.close(errwrite)
# Close all other fds, if asked for
if close_fds:
self._close_fds(but=errpipe_write)
if cwd != None:
os.chdir(cwd)
if preexec_fn:
apply(preexec_fn)
if env == None:
os.execvp(executable, args)
else:
os.execvpe(executable, args, env)
except:
exc_type, exc_value, tb = sys.exc_info()
# Save the traceback and attach it to the exception object
exc_lines = traceback.format_exception(exc_type,
exc_value,
tb)
exc_value.child_traceback = ''.join(exc_lines)
os.write(errpipe_write, pickle.dumps(exc_value))
# This exitcode won't be reported to applications, so it
# really doesn't matter what we return.
os._exit(255)
# Parent
os.close(errpipe_write)
if p2cread and p2cwrite:
os.close(p2cread)
if c2pwrite and c2pread:
os.close(c2pwrite)
if errwrite and errread:
os.close(errwrite)
# Wait for exec to fail or succeed; possibly raising exception
data = os.read(errpipe_read, 1048576) # Exceptions limited to 1 MB
os.close(errpipe_read)
if data != "":
os.waitpid(self.pid, 0)
child_exception = pickle.loads(data)
raise child_exception
def _handle_exitstatus(self, sts):
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
elif os.WIFEXITED(sts):
self.returncode = os.WEXITSTATUS(sts)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
_active.remove(self)
def poll(self):
"""Check if child process has terminated. Returns returncode
attribute."""
if self.returncode == None:
try:
pid, sts = os.waitpid(self.pid, os.WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except os.error:
pass
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode == None:
pid, sts = os.waitpid(self.pid, 0)
self._handle_exitstatus(sts)
return self.returncode
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
read_set = []
write_set = []
stdout = None # Return
stderr = None # Return
if self.stdin:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if input:
write_set.append(self.stdin)
else:
self.stdin.close()
if self.stdout:
read_set.append(self.stdout)
stdout = []
if self.stderr:
read_set.append(self.stderr)
stderr = []
while read_set or write_set:
rlist, wlist, xlist = select.select(read_set, write_set, [])
if self.stdin in wlist:
# When select has indicated that the file is writable,
# we can write up to PIPE_BUF bytes without risk
# blocking. POSIX defines PIPE_BUF >= 512
bytes_written = os.write(self.stdin.fileno(), input[:512])
input = input[bytes_written:]
if not input:
self.stdin.close()
write_set.remove(self.stdin)
if self.stdout in rlist:
data = os.read(self.stdout.fileno(), 1024)
if data == "":
self.stdout.close()
read_set.remove(self.stdout)
stdout.append(data)
if self.stderr in rlist:
data = os.read(self.stderr.fileno(), 1024)
if data == "":
self.stderr.close()
read_set.remove(self.stderr)
stderr.append(data)
# All data exchanged. Translate lists into strings.
if stdout != None:
stdout = ''.join(stdout)
if stderr != None:
stderr = ''.join(stderr)
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(open, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
def _demo_posix():
#
# Example 1: Simple redirection: Get process list
#
plist = Popen(["ps"], stdout=PIPE).communicate()[0]
print "Process list:"
print plist
#
# Example 2: Change uid before executing child
#
if os.getuid() == 0:
p = Popen(["id"], preexec_fn=lambda: os.setuid(100))
p.wait()
#
# Example 3: Connecting several subprocesses
#
print "Looking for 'hda'..."
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 4: Catch execution error
#
print
print "Trying a weird file..."
try:
print Popen(["/this/path/does/not/exist"]).communicate()
except OSError, e:
if e.errno == errno.ENOENT:
print "The file didn't exist. I thought so..."
print "Child traceback:"
print e.child_traceback
else:
print "Error", e.errno
else:
print >>sys.stderr, "Gosh. No error."
def _demo_windows():
#
# Example 1: Connecting several subprocesses
#
print "Looking for 'PROMPT' in set output..."
p1 = Popen("set", stdout=PIPE, shell=True)
p2 = Popen('find "PROMPT"', stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 2: Simple execution of program
#
print "Executing calc..."
p = Popen("calc")
p.wait()
if __name__ == "__main__":
if mswindows:
_demo_windows()
else:
_demo_posix()
| gpl-2.0 |
ProkopHapala/SimpleSimulationEngine | python/pyMeta/metaprograming.py | 1 | 6665 | #!/usr/bin/python
import os
import datetime
import re
lib_pre = 'lib'
re_curly = re.compile( '(?:|{})', re.DOTALL)
#re_currly = re.compile( '(?:|{})', re.DOTALL)
# see https://docs.scipy.org/doc/numpy/user/basics.types.html
array_types = {
"float" : ( "float32" , "array1f" ),
"double" : ( "float64" , "array1d" ),
"int" : ( "int32" , "array1i" ),
"bool" : ( "int" , "array1b" ),
"unit8_t" : ( "uint8" , "array1u8" ),
"unit16_t" : ( "uint16" , "array1u16" ),
"unit32_t" : ( "uint32" , "array1u32" ),
"unit64_t" : ( "uint64" , "array1u64" ),
"char" : ( None , "c_char_p" ),
}
# see : https://docs.python.org/2/library/ctypes.html
prim_types = {
"float" : "c_float",
"double" : "c_double",
"bool" : "c_bool",
"char" : "c_char",
"int" : "c_int",
"short" : "c_short",
"long" : "c_ulong",
"uint8_t" : "c_ubyte",
"uint16_t" : "c_ushort",
"uint32_t" : "c_uint",
"uint64_t" : "c_ulong",
#"wchar_t" : "c_wchar"
#"unsigned char" : "c_ubyte"
#"short" : "c_short"
#"unsigned short" : "c_ushort"
#"unsigned int" : "c_uint"
#"long" : "c_long"
#"unsigned long" : "c_ulong"
#"__int64" : "c_longlong"
#"long long" : "c_longlong"
#"unsigned __int64" : "c_ulonglong"
#"unsigned long long" : "c_ulonglong"
#"long double" : "c_longdouble"
}
old_array_types = set()
new_array_types = set()
'''
# http://stackoverflow.com/questions/29974929/match-open-and-close-brackets-in-garbled-string
def tokenize(source):
start = 0
end = len(source)
while start < end:
match = pattern.match(source, start)
if match:
yield match.group(0)
else:
raise ValueError('Invalid syntax at character %d' % start)
start = match.end()
def findCloseBrace( lines, iline0, ichr0, q='{', p='}' ):
brlevel = 1
iline = iline0
ichr = ichr0
while brlevel>0:
print lines[iline],
tokens = re_curly.match( lines[iline] ).group(0)
print tokens
iline+=1
if(iline>iline0+100):
break
'''
def parse_cpp_header( iline0, ichr, lines ):
args = []
nextl=True
iline = iline0
iend = iline0+100
ichr = lines[iline].find('(',ichr)+1
while(iline<iend):
line = lines[iline][ichr:]; ichr=0
ibr = line.find(')')
if(ibr>-1):
line = line[:ibr]
iend = 0
else:
iline += 1
print "line %i: >>%s<<" %(iline,line)
args += line.strip().split(',')
#args = filter("", args)
args = [x for x in args if x!=""]
print "args : ", args
return args,iline,ibr
def parse_cpp_func( iline0, ichr, fun_name, lines ):
ret=lines[iline0][:ichr].split()[-1]
args,iline,ichr = parse_cpp_header( iline0, ichr+len(fun_name), lines )
func_obj = [fun_name,args,ret,lines[iline0:iline+1]]
return iline, func_obj
def ctype2py( cstr ):
global new_array_types
ichr = cstr.find('*')
if(ichr>-1):
for key,val in array_types.iteritems():
if key in cstr:
ichr+=1
if not (val in old_array_types):
new_array_types.add(val)
#print "adding arraytype : ", val,new_array_types
return (cstr[ichr:].strip(),cstr[:ichr].strip(),val[1])
else:
for key,val in prim_types.iteritems():
ichr = cstr.find(key)
if ichr>-1:
ichr+=len(key)
return (cstr[ichr:].strip(),cstr[:ichr].strip(),val)
print "ERROR ctype2py: ", cstr
#return "TYPE_NOT_FOUND"
return None
def write_python_interface(func_obj, wraper=True, cppcomment=True ):
funname= func_obj[0]
#print "args : ", func_obj[1]
args = [ ctype2py( arg ) for arg in func_obj[1] ]
ret = prim_types.get(func_obj[2])
print funname,args
s = ""
if cppcomment:
s += "".join( [ "#"+s for s in func_obj[3] ] )
s += "%s.%s.argtypes = [%s]\n" %(lib_pre,funname,",".join([arg[2] for arg in args]) )
s += "%s.%s.restype = [%s]\n" %(lib_pre,funname,ret)
if( wraper ):
argnames = ",".join([arg[0] for arg in args])
if ret is not None:
retstr="return"
else:
retstr=""
s +=(("def %s( %s ):\n" %(funname,argnames))+
(" %s %s.%s( %s )" %(retstr,lib_pre,funname,argnames))
)
return s
def write_arraytypes( ):
return "".join([ "%s = np.ctypeslib.ndpointer(dtype=np.%s, ndim=1, flags='CONTIGUOUS')\n" %(typ[1],typ[0]) for typ in new_array_types ])
def check_existing_tokens( func_names, py_name ):
global old_array_types
func_names_ = [ "lib.%s.argtypes" %s for s in func_names ]
with open(py_name,'r') as py_file:
for line in py_file:
for fun_name in func_names_:
if fun_name in line:
func_names.remove(fun_name)
if( "np.ctypeslib.ndpointer" in line ):
for typ in array_types.values():
if (typ[1] in line):
old_array_types.add(typ)
break
def generate_interface( cpp_name, py_name, func_names ):
if( os.path.isfile(py_name) ):
check_existing_tokens( func_names, py_name )
print "old_array_types", old_array_types
print "func_names", func_names
py_file = open(py_name, 'a')
py_file.write("#========= auto update : %s\n" %str(datetime.datetime.now()) )
else:
py_file = open(py_name, 'w')
with open(cpp_name) as f:
cpp_lines = f.readlines()
iline = 0
nc = len(cpp_lines)
new_funcs_interfaces = []
while (iline < nc):
cline = cpp_lines[iline]
#print iline, cline,
for fun_name in func_names:
ichr = cline.find(fun_name)
if ichr > -1:
#print ">>>>>>> FOUND : ",fun_name
iline,func_obj = parse_cpp_func( iline, ichr, fun_name, cpp_lines )
new_funcs_interfaces.append( write_python_interface(func_obj) )
func_names.remove(fun_name)
break
iline+=1
print "old_array_types", old_array_types
print "new_array_types", new_array_types
py_file.write( write_arraytypes( ) + "\n" )
for func in new_funcs_interfaces:
py_file.write( func + "\n" )
py_file.write("\n")
py_file.close
| mit |
taknevski/tensorflow-xsmm | tensorflow/contrib/graph_editor/select.py | 75 | 28656 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various ways of selecting operations and tensors in a graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from six import iteritems
from six import string_types
from tensorflow.contrib.graph_editor import util
from tensorflow.python.framework import ops as tf_ops
__all__ = [
"can_be_regex",
"make_regex",
"filter_ts",
"filter_ts_from_regex",
"filter_ops",
"filter_ops_from_regex",
"get_name_scope_ops",
"check_cios",
"get_ops_ios",
"compute_boundary_ts",
"get_within_boundary_ops",
"get_forward_walk_ops",
"get_backward_walk_ops",
"get_walks_intersection_ops",
"get_walks_union_ops",
"select_ops",
"select_ts",
"select_ops_and_ts",
]
_RE_TYPE = type(re.compile(""))
def can_be_regex(obj):
"""Return True if obj can be turned into a regular expression."""
return isinstance(obj, string_types + (_RE_TYPE,))
def make_regex(obj):
"""Return a compiled regular expression.
Args:
obj: a string or a regular expression.
Returns:
A compiled regular expression.
Raises:
ValueError: if obj could not be converted to a regular expression.
"""
if not can_be_regex(obj):
raise ValueError("Expected a string or a regex, got: {}".format(type(obj)))
if isinstance(obj, string_types):
return re.compile(obj)
else:
return obj
def _get_input_ts(ops):
"""Compute the list of unique input tensors of all the op in ops.
Args:
ops: an object convertible to a list of `tf.Operation`.
Returns:
The list of unique input tensors of all the op in ops.
Raises:
TypeError: if ops cannot be converted to a list of `tf.Operation`.
"""
ops = util.make_list_of_op(ops)
ts = []
ts_set = set()
for op in ops:
for t in op.inputs:
if t not in ts_set:
ts.append(t)
ts_set.add(t)
return ts
def _get_output_ts(ops):
"""Compute the list of unique output tensors of all the op in ops.
Args:
ops: an object convertible to a list of tf.Operation.
Returns:
The list of unique output tensors of all the op in ops.
Raises:
TypeError: if ops cannot be converted to a list of tf.Operation.
"""
ops = util.make_list_of_op(ops)
ts = []
for op in ops:
ts += op.outputs
return ts
def filter_ts(ops, positive_filter):
"""Get all the tensors which are input or output of an op in ops.
Args:
ops: an object convertible to a list of `tf.Operation`.
positive_filter: a function deciding whether to keep a tensor or not.
If `True`, all the tensors are returned.
Returns:
A list of `tf.Tensor`.
Raises:
TypeError: if ops cannot be converted to a list of `tf.Operation`.
"""
ops = util.make_list_of_op(ops)
ts = _get_input_ts(ops)
util.concatenate_unique(ts, _get_output_ts(ops))
if positive_filter is not True:
ts = [t for t in ts if positive_filter(t)]
return ts
def filter_ts_from_regex(ops, regex):
r"""Get all the tensors linked to ops that match the given regex.
Args:
ops: an object convertible to a list of tf.Operation.
regex: a regular expression matching the tensors' name.
For example, "^foo(/.*)?:\d+$" will match all the tensors in the "foo"
scope.
Returns:
A list of tf.Tensor.
Raises:
TypeError: if ops cannot be converted to a list of tf.Operation.
"""
ops = util.make_list_of_op(ops)
regex_obj = make_regex(regex)
return filter_ts(ops, positive_filter=lambda op: regex_obj.search(op.name))
def filter_ops(ops, positive_filter):
"""Get the ops passing the given filter.
Args:
ops: an object convertible to a list of tf.Operation.
positive_filter: a function deciding where to keep an operation or not.
If True, all the operations are returned.
Returns:
A list of selected tf.Operation.
Raises:
TypeError: if ops cannot be converted to a list of tf.Operation.
"""
ops = util.make_list_of_op(ops)
if positive_filter is not True: # pylint: disable=g-explicit-bool-comparison
ops = [op for op in ops if positive_filter(op)]
return ops
def filter_ops_from_regex(ops, regex):
"""Get all the operations that match the given regex.
Args:
ops: an object convertible to a list of `tf.Operation`.
regex: a regular expression matching the operation's name.
For example, `"^foo(/.*)?$"` will match all the operations in the "foo"
scope.
Returns:
A list of `tf.Operation`.
Raises:
TypeError: if ops cannot be converted to a list of `tf.Operation`.
"""
ops = util.make_list_of_op(ops)
regex_obj = make_regex(regex)
return filter_ops(ops, lambda op: regex_obj.search(op.name))
def get_name_scope_ops(ops, scope):
"""Get all the operations under the given scope path.
Args:
ops: an object convertible to a list of tf.Operation.
scope: a scope path.
Returns:
A list of tf.Operation.
Raises:
TypeError: if ops cannot be converted to a list of tf.Operation.
"""
if scope and scope[-1] == "/":
scope = scope[:-1]
return filter_ops_from_regex(ops, "^{}(/.*)?$".format(scope))
def check_cios(control_inputs=False, control_outputs=None, control_ios=None):
"""Do various check on control_inputs and control_outputs.
Args:
control_inputs: A boolean indicating whether control inputs are enabled.
control_outputs: An instance of util.ControlOutputs or None. If not None,
control outputs are enabled.
control_ios: An instance of util.ControlOutputs or None. If not None, both
control inputs and control outputs are enabled. This is equivalent to set
control_inputs to True and control_outputs to the util.ControlOutputs
instance.
Returns:
A tuple `(control_inputs, control_outputs)` where:
`control_inputs` is a boolean indicating whether to use control inputs.
`control_outputs` is an instance of util.ControlOutputs or None
Raises:
ValueError: if control_inputs is an instance of util.ControlOutputs but
control_outputs is not None
TypeError: if control_outputs is not None and is not a util.ControlOutputs.
"""
if control_ios is not None:
if not isinstance(control_ios, util.ControlOutputs):
raise TypeError("Expected a util.ControlOutputs, got: {}".format(
type(control_ios)))
if control_outputs is not None:
raise ValueError("control_outputs should be None when using control_ios.")
control_inputs = True
control_outputs = control_ios
elif control_outputs is not None:
if not isinstance(control_outputs, util.ControlOutputs):
raise TypeError("Expected a util.ControlOutputs, got: {}".format(
type(control_outputs)))
if control_outputs is not None:
control_outputs.update()
return control_inputs, control_outputs
def get_ops_ios(ops, control_inputs=False, control_outputs=None,
control_ios=None):
"""Return all the `tf.Operation` which are connected to an op in ops.
Args:
ops: an object convertible to a list of `tf.Operation`.
control_inputs: A boolean indicating whether control inputs are enabled.
control_outputs: An instance of `util.ControlOutputs` or `None`. If not
`None`, control outputs are enabled.
control_ios: An instance of `util.ControlOutputs` or `None`. If not `None`,
both control inputs and control outputs are enabled. This is equivalent to
set `control_inputs` to `True` and `control_outputs` to the
`util.ControlOutputs` instance.
Returns:
All the `tf.Operation` surrounding the given ops.
Raises:
TypeError: if `ops` cannot be converted to a list of `tf.Operation`.
"""
control_inputs, control_outputs = check_cios(control_inputs, control_outputs,
control_ios)
ops = util.make_list_of_op(ops)
res = []
for op in ops:
util.concatenate_unique(res, [t.op for t in op.inputs])
for t in op.outputs:
util.concatenate_unique(res, t.consumers())
if control_outputs is not None:
util.concatenate_unique(res, control_outputs.get(op))
if control_inputs:
util.concatenate_unique(res, op.control_inputs)
return res
def compute_boundary_ts(ops):
"""Compute the tensors at the boundary of a set of ops.
This function looks at all the tensors connected to the given ops (in/out)
and classify them into three categories:
1) input tensors: tensors whose generating operation is not in ops.
2) output tensors: tensors whose consumer operations are not in ops
3) inside tensors: tensors which are neither input nor output tensors.
Note that a tensor can be both an inside tensor and an output tensor if it is
consumed by operations both outside and inside of `ops`.
Args:
ops: an object convertible to a list of tf.Operation.
Returns:
A tuple `(outside_input_ts, outside_output_ts, inside_ts)` where:
`outside_input_ts` is a Python list of input tensors;
`outside_output_ts` is a python list of output tensors;
`inside_ts` is a python list of inside tensors.
Since a tensor can be both an inside tensor and an output tensor,
`outside_output_ts` and `inside_ts` might intersect.
Raises:
TypeError: if ops cannot be converted to a list of tf.Operation.
"""
ops = util.make_list_of_op(ops)
input_ts = _get_input_ts(ops)
output_ts = _get_output_ts(ops)
output_ts_set = frozenset(output_ts)
ops_set = frozenset(ops)
# Compute inside tensors.
inside_ts = []
only_inside_ts = []
for t in input_ts:
# Skip if the input tensor is not also an output tensor.
if t not in output_ts_set:
continue
# Mark as "inside".
inside_ts.append(t)
# Mark as "only inside" if the tensor is not both inside and output.
consumers = frozenset(t.consumers())
if consumers - ops_set:
continue
only_inside_ts.append(t)
inside_ts_set = frozenset(inside_ts)
only_inside_ts_set = frozenset(only_inside_ts)
outside_output_ts = [t for t in output_ts if t not in only_inside_ts_set]
outside_input_ts = [t for t in input_ts if t not in inside_ts_set]
return outside_input_ts, outside_output_ts, inside_ts
def get_within_boundary_ops(ops,
seed_ops,
boundary_ops=(),
inclusive=True,
control_inputs=False,
control_outputs=None,
control_ios=None):
"""Return all the `tf.Operation` within the given boundary.
Args:
ops: an object convertible to a list of `tf.Operation`. those ops define the
set in which to perform the operation (if a `tf.Graph` is given, it
will be converted to the list of all its operations).
seed_ops: the operations from which to start expanding.
boundary_ops: the ops forming the boundary.
inclusive: if `True`, the result will also include the boundary ops.
control_inputs: A boolean indicating whether control inputs are enabled.
control_outputs: An instance of `util.ControlOutputs` or `None`. If not
`None`, control outputs are enabled.
control_ios: An instance of `util.ControlOutputs` or `None`. If not
`None`, both control inputs and control outputs are enabled. This is
equivalent to set control_inputs to True and control_outputs to
the `util.ControlOutputs` instance.
Returns:
All the `tf.Operation` surrounding the given ops.
Raises:
TypeError: if `ops` or `seed_ops` cannot be converted to a list of
`tf.Operation`.
ValueError: if the boundary is intersecting with the seeds.
"""
control_inputs, control_outputs = check_cios(control_inputs, control_outputs,
control_ios)
ops = util.make_list_of_op(ops)
seed_ops = util.make_list_of_op(seed_ops, allow_graph=False)
boundary_ops = set(util.make_list_of_op(boundary_ops))
res = set(seed_ops)
if boundary_ops & res:
raise ValueError("Boundary is intersecting with the seeds.")
wave = set(seed_ops)
while wave:
new_wave = set()
ops_io = get_ops_ios(wave, control_inputs, control_outputs)
for op in ops_io:
if op in res:
continue
if op in boundary_ops:
if inclusive:
res.add(op)
else:
new_wave.add(op)
res.update(new_wave)
wave = new_wave
return [op for op in ops if op in res]
def get_forward_walk_ops(seed_ops,
inclusive=True,
within_ops=None,
stop_at_ts=(),
control_outputs=None):
"""Do a forward graph walk and return all the visited ops.
Args:
seed_ops: an iterable of operations from which the forward graph
walk starts. If a list of tensors is given instead, the seed_ops are set
to be the consumers of those tensors.
inclusive: if True the given seed_ops are also part of the resulting set.
within_ops: an iterable of `tf.Operation` within which the search is
restricted. If `within_ops` is `None`, the search is performed within
the whole graph.
stop_at_ts: an iterable of tensors at which the graph walk stops.
control_outputs: a `util.ControlOutputs` instance or None.
If not `None`, it will be used while walking the graph forward.
Returns:
A Python set of all the `tf.Operation` ahead of `seed_ops`.
Raises:
TypeError: if `seed_ops` or `within_ops` cannot be converted to a list of
`tf.Operation`.
"""
_, control_outputs = check_cios(False, control_outputs)
if not util.is_iterable(seed_ops):
seed_ops = [seed_ops]
if not seed_ops:
return []
if isinstance(seed_ops[0], tf_ops.Tensor):
ts = util.make_list_of_t(seed_ops, allow_graph=False)
seed_ops = util.get_consuming_ops(ts)
else:
seed_ops = util.make_list_of_op(seed_ops, allow_graph=False)
seed_ops = frozenset(seed_ops)
stop_at_ts = frozenset(util.make_list_of_t(stop_at_ts))
if within_ops:
within_ops = util.make_list_of_op(within_ops, allow_graph=False)
within_ops = frozenset(within_ops)
seed_ops &= within_ops
def is_within(op):
return within_ops is None or op in within_ops
result = list(seed_ops)
wave = set(seed_ops)
while wave:
new_wave = set()
for op in wave:
for new_t in op.outputs:
if new_t in stop_at_ts:
continue
for new_op in new_t.consumers():
if new_op not in result and is_within(new_op):
new_wave.add(new_op)
if control_outputs is not None:
for new_op in control_outputs.get(op):
if new_op not in result and is_within(new_op):
new_wave.add(new_op)
util.concatenate_unique(result, new_wave)
wave = new_wave
if not inclusive:
result = [op for op in result if op not in seed_ops]
return result
def get_backward_walk_ops(seed_ops,
inclusive=True,
within_ops=None,
stop_at_ts=(),
control_inputs=False):
"""Do a backward graph walk and return all the visited ops.
Args:
seed_ops: an iterable of operations from which the backward graph
walk starts. If a list of tensors is given instead, the seed_ops are set
to be the generators of those tensors.
inclusive: if True the given seed_ops are also part of the resulting set.
within_ops: an iterable of `tf.Operation` within which the search is
restricted. If `within_ops` is `None`, the search is performed within
the whole graph.
stop_at_ts: an iterable of tensors at which the graph walk stops.
control_inputs: if True, control inputs will be used while moving backward.
Returns:
A Python set of all the `tf.Operation` behind `seed_ops`.
Raises:
TypeError: if `seed_ops` or `within_ops` cannot be converted to a list of
`tf.Operation`.
"""
if not util.is_iterable(seed_ops):
seed_ops = [seed_ops]
if not seed_ops:
return []
if isinstance(seed_ops[0], tf_ops.Tensor):
ts = util.make_list_of_t(seed_ops, allow_graph=False)
seed_ops = util.get_generating_ops(ts)
else:
seed_ops = util.make_list_of_op(seed_ops, allow_graph=False)
stop_at_ts = frozenset(util.make_list_of_t(stop_at_ts))
seed_ops = frozenset(util.make_list_of_op(seed_ops))
if within_ops:
within_ops = util.make_list_of_op(within_ops, allow_graph=False)
within_ops = frozenset(within_ops)
seed_ops &= within_ops
def is_within(op):
return within_ops is None or op in within_ops
result = list(seed_ops)
wave = set(seed_ops)
while wave:
new_wave = set()
for op in wave:
for new_t in op.inputs:
if new_t in stop_at_ts:
continue
if new_t.op not in result and is_within(new_t.op):
new_wave.add(new_t.op)
if control_inputs:
for new_op in op.control_inputs:
if new_op not in result and is_within(new_op):
new_wave.add(new_op)
util.concatenate_unique(result, new_wave)
wave = new_wave
if not inclusive:
result = [op for op in result if op not in seed_ops]
return result
def get_walks_intersection_ops(forward_seed_ops,
backward_seed_ops,
forward_inclusive=True,
backward_inclusive=True,
within_ops=None,
control_inputs=False,
control_outputs=None,
control_ios=None):
"""Return the intersection of a forward and a backward walk.
Args:
forward_seed_ops: an iterable of operations from which the forward graph
walk starts. If a list of tensors is given instead, the seed_ops are set
to be the consumers of those tensors.
backward_seed_ops: an iterable of operations from which the backward graph
walk starts. If a list of tensors is given instead, the seed_ops are set
to be the generators of those tensors.
forward_inclusive: if True the given forward_seed_ops are also part of the
resulting set.
backward_inclusive: if True the given backward_seed_ops are also part of the
resulting set.
within_ops: an iterable of tf.Operation within which the search is
restricted. If within_ops is None, the search is performed within
the whole graph.
control_inputs: A boolean indicating whether control inputs are enabled.
control_outputs: An instance of util.ControlOutputs or None. If not None,
control outputs are enabled.
control_ios: An instance of util.ControlOutputs or None. If not None, both
control inputs and control outputs are enabled. This is equivalent to set
control_inputs to True and control_outputs to the util.ControlOutputs
instance.
Returns:
A Python set of all the tf.Operation in the intersection of a forward and a
backward walk.
Raises:
TypeError: if `forward_seed_ops` or `backward_seed_ops` or `within_ops`
cannot be converted to a list of `tf.Operation`.
"""
control_inputs, control_outputs = check_cios(control_inputs, control_outputs,
control_ios)
forward_ops = get_forward_walk_ops(
forward_seed_ops,
inclusive=forward_inclusive,
within_ops=within_ops,
control_outputs=control_outputs)
backward_ops = get_backward_walk_ops(
backward_seed_ops,
inclusive=backward_inclusive,
within_ops=within_ops,
control_inputs=control_inputs)
return [op for op in forward_ops if op in backward_ops]
def get_walks_union_ops(forward_seed_ops,
backward_seed_ops,
forward_inclusive=True,
backward_inclusive=True,
within_ops=None,
control_inputs=False,
control_outputs=None,
control_ios=None):
"""Return the union of a forward and a backward walk.
Args:
forward_seed_ops: an iterable of operations from which the forward graph
walk starts. If a list of tensors is given instead, the seed_ops are set
to be the consumers of those tensors.
backward_seed_ops: an iterable of operations from which the backward graph
walk starts. If a list of tensors is given instead, the seed_ops are set
to be the generators of those tensors.
forward_inclusive: if True the given forward_seed_ops are also part of the
resulting set.
backward_inclusive: if True the given backward_seed_ops are also part of the
resulting set.
within_ops: restrict the search within those operations. If within_ops is
None, the search is done within the whole graph.
control_inputs: A boolean indicating whether control inputs are enabled.
control_outputs: An instance of util.ControlOutputs or None. If not None,
control outputs are enabled.
control_ios: An instance of util.ControlOutputs or None. If not None, both
control inputs and control outputs are enabled. This is equivalent to set
control_inputs to True and control_outputs to the util.ControlOutputs
instance.
Returns:
A Python set of all the tf.Operation in the union of a forward and a
backward walk.
Raises:
TypeError: if forward_seed_ops or backward_seed_ops or within_ops cannot be
converted to a list of tf.Operation.
"""
control_inputs, control_outputs = check_cios(control_inputs, control_outputs,
control_ios)
forward_ops = get_forward_walk_ops(
forward_seed_ops,
inclusive=forward_inclusive,
within_ops=within_ops,
control_outputs=control_outputs)
backward_ops = get_backward_walk_ops(
backward_seed_ops,
inclusive=backward_inclusive,
within_ops=within_ops,
control_inputs=control_inputs)
return util.concatenate_unique(forward_ops, backward_ops)
def select_ops(*args, **kwargs):
"""Helper to select operations.
Args:
*args: list of 1) regular expressions (compiled or not) or 2) (array of)
`tf.Operation`. `tf.Tensor` instances are silently ignored.
**kwargs: 'graph': `tf.Graph` in which to perform the regex query.This is
required when using regex.
'positive_filter': an elem if selected only if `positive_filter(elem)` is
`True`. This is optional.
'restrict_ops_regex': a regular expression is ignored if it doesn't start
with the substring "(?#ops)".
Returns:
A list of `tf.Operation`.
Raises:
TypeError: if the optional keyword argument graph is not a `tf.Graph`
or if an argument in args is not an (array of) `tf.Operation`
or an (array of) `tf.Tensor` (silently ignored) or a string
or a regular expression.
ValueError: if one of the keyword arguments is unexpected or if a regular
expression is used without passing a graph as a keyword argument.
"""
# get keywords arguments
graph = None
positive_filter = None
restrict_ops_regex = False
for k, v in iteritems(kwargs):
if k == "graph":
graph = v
if graph is not None and not isinstance(graph, tf_ops.Graph):
raise TypeError("Expected a tf.Graph, got: {}".format(type(graph)))
elif k == "positive_filter":
positive_filter = v
elif k == "restrict_ops_regex":
restrict_ops_regex = v
elif k == "restrict_ts_regex":
pass
else:
raise ValueError("Wrong keywords argument: {}.".format(k))
ops = []
for arg in args:
if can_be_regex(arg):
if graph is None:
raise ValueError("Use the keyword argument 'graph' to use regex.")
regex = make_regex(arg)
if regex.pattern.startswith("(?#ts)"):
continue
if restrict_ops_regex and not regex.pattern.startswith("(?#ops)"):
continue
ops_ = filter_ops_from_regex(graph, regex)
for op_ in ops_:
if op_ not in ops:
if positive_filter is None or positive_filter(op_):
ops.append(op_)
else:
ops_aux = util.make_list_of_op(arg, ignore_ts=True)
if positive_filter is not None:
ops_aux = [op for op in ops_aux if positive_filter(op)]
ops_aux = [op for op in ops_aux if op not in ops]
ops += ops_aux
return ops
def select_ts(*args, **kwargs):
"""Helper to select tensors.
Args:
*args: list of 1) regular expressions (compiled or not) or 2) (array of)
`tf.Tensor`. `tf.Operation` instances are silently ignored.
**kwargs: 'graph': `tf.Graph` in which to perform the regex query.This is
required when using regex.
'positive_filter': an elem if selected only if `positive_filter(elem)` is
`True`. This is optional.
'restrict_ts_regex': a regular expression is ignored if it doesn't start
with the substring "(?#ts)".
Returns:
A list of `tf.Tensor`.
Raises:
TypeError: if the optional keyword argument graph is not a `tf.Graph`
or if an argument in args is not an (array of) `tf.Tensor`
or an (array of) `tf.Operation` (silently ignored) or a string
or a regular expression.
ValueError: if one of the keyword arguments is unexpected or if a regular
expression is used without passing a graph as a keyword argument.
"""
# get keywords arguments
graph = None
positive_filter = None
restrict_ts_regex = False
for k, v in iteritems(kwargs):
if k == "graph":
graph = v
if graph is not None and not isinstance(graph, tf_ops.Graph):
raise TypeError("Expected a tf.Graph, got {}".format(type(graph)))
elif k == "positive_filter":
positive_filter = v
elif k == "restrict_ts_regex":
restrict_ts_regex = v
elif k == "restrict_ops_regex":
pass
else:
raise ValueError("Wrong keywords argument: {}.".format(k))
ts = []
for arg in args:
if can_be_regex(arg):
if graph is None:
raise ValueError("Use the keyword argument 'graph' to use regex.")
regex = make_regex(arg)
if regex.pattern.startswith("(?#ops)"):
continue
if restrict_ts_regex and not regex.pattern.startswith("(?#ts)"):
continue
ts_ = filter_ts_from_regex(graph, regex)
for t_ in ts_:
if t_ not in ts:
if positive_filter is None or positive_filter(t_):
ts.append(t_)
else:
ts_aux = util.make_list_of_t(arg, ignore_ops=True)
if positive_filter is not None:
ts_aux = [t for t in ts_aux if positive_filter(t)]
ts_aux = [t for t in ts_aux if t not in ts]
ts += ts_aux
return ts
def select_ops_and_ts(*args, **kwargs):
"""Helper to select operations and tensors.
Args:
*args: list of 1) regular expressions (compiled or not) or 2) (array of)
`tf.Operation` 3) (array of) tf.Tensor. Regular expressions matching
tensors must start with the comment `"(?#ts)"`, for instance:
`"(?#ts)^foo/.*"`.
**kwargs: 'graph': `tf.Graph` in which to perform the regex query.This is
required when using regex.
'positive_filter': an elem if selected only if `positive_filter(elem)` is
`True`. This is optional.
Returns:
A tuple `(ops, ts)` where:
`ops` is a list of `tf.Operation`, and
`ts` is a list of `tf.Tensor`
Raises:
TypeError: if the optional keyword argument graph is not a `tf.Graph`
or if an argument in args is not an (array of) `tf.Tensor`
or an (array of) `tf.Operation` or a string or a regular expression.
ValueError: if one of the keyword arguments is unexpected or if a regular
expression is used without passing a graph as a keyword argument.
"""
ops = select_ops(*args, restrict_ops_regex=False, **kwargs)
ts = select_ts(*args, restrict_ts_regex=True, **kwargs)
return ops, ts
| apache-2.0 |
insiight/ddpclient | ddpclient/user_list_client_selector.py | 1 | 1087 | from .selector import Selector
class UserListClientSelector(Selector):
def __init__(self):
super(UserListClientSelector, self).__init__()
def from_date_range(self, min_date=None, max_date=None):
if min_date is None:
min_date = datetime.date(1990, 1, 1)
if max_date is None:
max_date = datetime.date(2038, 1, 1)
self.selector_data['dateRange'] = ('DateRange',
{'min':
('Date', {'year': min_date.year,
'month': min_date.month,
'day': min_date.day}),
'max':
('Date', {'year': max_date.year,
'month': max_date.month,
'day': max_date.day})}
)
return self
| mit |
GanjaCoinProject/ganjacoin | share/qt/extract_strings_qt.py | 1294 | 1784 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {')
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| mit |
snasoft/QtCreatorPluginsPack | Bin/3rdParty/vera/bin/lib/distutils/tests/test_install_headers.py | 141 | 1269 | """Tests for distutils.command.install_headers."""
import sys
import os
import unittest
import getpass
from distutils.command.install_headers import install_headers
from distutils.tests import support
from test.test_support import run_unittest
class InstallHeadersTestCase(support.TempdirManager,
support.LoggingSilencer,
support.EnvironGuard,
unittest.TestCase):
def test_simple_run(self):
# we have two headers
header_list = self.mkdtemp()
header1 = os.path.join(header_list, 'header1')
header2 = os.path.join(header_list, 'header2')
self.write_file(header1)
self.write_file(header2)
headers = [header1, header2]
pkg_dir, dist = self.create_dist(headers=headers)
cmd = install_headers(dist)
self.assertEqual(cmd.get_inputs(), headers)
# let's run the command
cmd.install_dir = os.path.join(pkg_dir, 'inst')
cmd.ensure_finalized()
cmd.run()
# let's check the results
self.assertEqual(len(cmd.get_outputs()), 2)
def test_suite():
return unittest.makeSuite(InstallHeadersTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| lgpl-3.0 |
bdang2012/taiga-back | taiga/projects/services/stats.py | 15 | 13047 | # Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.utils.translation import ugettext as _
from django.db.models import Q, Count
from django.apps import apps
import datetime
import copy
from taiga.projects.history.models import HistoryEntry
def _get_milestones_stats_for_backlog(project):
"""
Get collection of stats for each millestone of project.
Data returned by this function are used on backlog.
"""
current_evolution = 0
current_team_increment = 0
current_client_increment = 0
optimal_points_per_sprint = 0
if project.total_story_points and project.total_milestones:
optimal_points_per_sprint = project.total_story_points / project.total_milestones
future_team_increment = sum(project.future_team_increment.values())
future_client_increment = sum(project.future_client_increment.values())
milestones = project.milestones.order_by('estimated_start').\
prefetch_related("user_stories",
"user_stories__role_points",
"user_stories__role_points__points")
milestones = list(milestones)
milestones_count = len(milestones)
optimal_points = 0
team_increment = 0
client_increment = 0
for current_milestone in range(0, max(milestones_count, project.total_milestones)):
optimal_points = (project.total_story_points -
(optimal_points_per_sprint * current_milestone))
evolution = (project.total_story_points - current_evolution
if current_evolution is not None else None)
if current_milestone < milestones_count:
ml = milestones[current_milestone]
milestone_name = ml.name
team_increment = current_team_increment
client_increment = current_client_increment
current_evolution += sum(ml.closed_points.values())
current_team_increment += sum(ml.team_increment_points.values())
current_client_increment += sum(ml.client_increment_points.values())
else:
milestone_name = _("Future sprint")
team_increment = current_team_increment + future_team_increment,
client_increment = current_client_increment + future_client_increment,
current_evolution = None
yield {
'name': milestone_name,
'optimal': optimal_points,
'evolution': evolution,
'team-increment': team_increment,
'client-increment': client_increment,
}
optimal_points -= optimal_points_per_sprint
evolution = (project.total_story_points - current_evolution
if current_evolution is not None and project.total_story_points else None)
yield {
'name': _('Project End'),
'optimal': optimal_points,
'evolution': evolution,
'team-increment': team_increment,
'client-increment': client_increment,
}
def _count_status_object(status_obj, counting_storage):
if status_obj.id in counting_storage:
counting_storage[status_obj.id]['count'] += 1
else:
counting_storage[status_obj.id] = {}
counting_storage[status_obj.id]['count'] = 1
counting_storage[status_obj.id]['name'] = status_obj.name
counting_storage[status_obj.id]['id'] = status_obj.id
counting_storage[status_obj.id]['color'] = status_obj.color
def _count_owned_object(user_obj, counting_storage):
if user_obj:
if user_obj.id in counting_storage:
counting_storage[user_obj.id]['count'] += 1
else:
counting_storage[user_obj.id] = {}
counting_storage[user_obj.id]['count'] = 1
counting_storage[user_obj.id]['username'] = user_obj.username
counting_storage[user_obj.id]['name'] = user_obj.get_full_name()
counting_storage[user_obj.id]['id'] = user_obj.id
counting_storage[user_obj.id]['color'] = user_obj.color
else:
if 0 in counting_storage:
counting_storage[0]['count'] += 1
else:
counting_storage[0] = {}
counting_storage[0]['count'] = 1
counting_storage[0]['username'] = _('Unassigned')
counting_storage[0]['name'] = _('Unassigned')
counting_storage[0]['id'] = 0
counting_storage[0]['color'] = 'black'
def get_stats_for_project_issues(project):
project_issues_stats = {
'total_issues': 0,
'opened_issues': 0,
'closed_issues': 0,
'issues_per_type': {},
'issues_per_status': {},
'issues_per_priority': {},
'issues_per_severity': {},
'issues_per_owner': {},
'issues_per_assigned_to': {},
'last_four_weeks_days': {
'by_open_closed': {'open': [], 'closed': []},
'by_severity': {},
'by_priority': {},
'by_status': {},
}
}
issues = project.issues.all().select_related(
'status', 'priority', 'type', 'severity', 'owner', 'assigned_to'
)
for issue in issues:
project_issues_stats['total_issues'] += 1
if issue.status.is_closed:
project_issues_stats['closed_issues'] += 1
else:
project_issues_stats['opened_issues'] += 1
_count_status_object(issue.type, project_issues_stats['issues_per_type'])
_count_status_object(issue.status, project_issues_stats['issues_per_status'])
_count_status_object(issue.priority, project_issues_stats['issues_per_priority'])
_count_status_object(issue.severity, project_issues_stats['issues_per_severity'])
_count_owned_object(issue.owner, project_issues_stats['issues_per_owner'])
_count_owned_object(issue.assigned_to, project_issues_stats['issues_per_assigned_to'])
for severity in project_issues_stats['issues_per_severity'].values():
project_issues_stats['last_four_weeks_days']['by_severity'][severity['id']] = copy.copy(severity)
del(project_issues_stats['last_four_weeks_days']['by_severity'][severity['id']]['count'])
project_issues_stats['last_four_weeks_days']['by_severity'][severity['id']]['data'] = []
for priority in project_issues_stats['issues_per_priority'].values():
project_issues_stats['last_four_weeks_days']['by_priority'][priority['id']] = copy.copy(priority)
del(project_issues_stats['last_four_weeks_days']['by_priority'][priority['id']]['count'])
project_issues_stats['last_four_weeks_days']['by_priority'][priority['id']]['data'] = []
for x in range(27, -1, -1):
day = datetime.datetime.combine(datetime.date.today(), datetime.time(0, 0)) - datetime.timedelta(days=x)
next_day = day + datetime.timedelta(days=1)
open_this_day = filter(lambda x: x.created_date.replace(tzinfo=None) >= day, issues)
open_this_day = filter(lambda x: x.created_date.replace(tzinfo=None) < next_day, open_this_day)
open_this_day = len(list(open_this_day))
project_issues_stats['last_four_weeks_days']['by_open_closed']['open'].append(open_this_day)
closed_this_day = filter(lambda x: x.finished_date, issues)
closed_this_day = filter(lambda x: x.finished_date.replace(tzinfo=None) >= day, closed_this_day)
closed_this_day = filter(lambda x: x.finished_date.replace(tzinfo=None) < next_day, closed_this_day)
closed_this_day = len(list(closed_this_day))
project_issues_stats['last_four_weeks_days']['by_open_closed']['closed'].append(closed_this_day)
opened_this_day = filter(lambda x: x.created_date.replace(tzinfo=None) < next_day, issues)
opened_this_day = list(filter(lambda x: x.finished_date is None or x.finished_date.replace(tzinfo=None) > day, opened_this_day))
for severity in project_issues_stats['last_four_weeks_days']['by_severity']:
by_severity = filter(lambda x: x.severity_id == severity, opened_this_day)
by_severity = len(list(by_severity))
project_issues_stats['last_four_weeks_days']['by_severity'][severity]['data'].append(by_severity)
for priority in project_issues_stats['last_four_weeks_days']['by_priority']:
by_priority = filter(lambda x: x.priority_id == priority, opened_this_day)
by_priority = len(list(by_priority))
project_issues_stats['last_four_weeks_days']['by_priority'][priority]['data'].append(by_priority)
return project_issues_stats
def get_stats_for_project(project):
project = apps.get_model("projects", "Project").objects.\
prefetch_related("milestones",
"user_stories").\
get(id=project.id)
points = project.calculated_points
closed_points = sum(points["closed"].values())
closed_milestones = project.milestones.filter(closed=True).count()
speed = 0
if closed_milestones != 0:
speed = closed_points / closed_milestones
project_stats = {
'name': project.name,
'total_milestones': project.total_milestones,
'total_points': project.total_story_points,
'closed_points': closed_points,
'closed_points_per_role': points["closed"],
'defined_points': sum(points["defined"].values()),
'defined_points_per_role': points["defined"],
'assigned_points': sum(points["assigned"].values()),
'assigned_points_per_role': points["assigned"],
'milestones': _get_milestones_stats_for_backlog(project),
'speed': speed,
}
return project_stats
def _get_closed_bugs_per_member_stats(project):
# Closed bugs per user
closed_bugs = project.issues.filter(status__is_closed=True)\
.values('assigned_to')\
.annotate(count=Count('assigned_to'))\
.order_by()
closed_bugs = { p["assigned_to"]: p["count"] for p in closed_bugs}
return closed_bugs
def _get_iocaine_tasks_per_member_stats(project):
# Iocaine tasks assigned per user
iocaine_tasks = project.tasks.filter(is_iocaine=True)\
.values('assigned_to')\
.annotate(count=Count('assigned_to'))\
.order_by()
iocaine_tasks = { t["assigned_to"]: t["count"] for t in iocaine_tasks}
return iocaine_tasks
def _get_wiki_changes_per_member_stats(project):
# Wiki changes
wiki_changes = {}
wiki_page_keys = ["wiki.wikipage:%s"%id for id in project.wiki_pages.values_list("id", flat=True)]
history_entries = HistoryEntry.objects.filter(key__in=wiki_page_keys).values('user')
for entry in history_entries:
editions = wiki_changes.get(entry["user"]["pk"], 0)
wiki_changes[entry["user"]["pk"]] = editions + 1
return wiki_changes
def _get_created_bugs_per_member_stats(project):
# Created_bugs
created_bugs = project.issues\
.values('owner')\
.annotate(count=Count('owner'))\
.order_by()
created_bugs = { p["owner"]: p["count"] for p in created_bugs }
return created_bugs
def _get_closed_tasks_per_member_stats(project):
# Closed tasks
closed_tasks = project.tasks.filter(status__is_closed=True)\
.values('assigned_to')\
.annotate(count=Count('assigned_to'))\
.order_by()
closed_tasks = {p["assigned_to"]: p["count"] for p in closed_tasks}
return closed_tasks
def get_member_stats_for_project(project):
base_counters = {id: 0 for id in project.members.values_list("id", flat=True)}
closed_bugs = base_counters.copy()
closed_bugs.update(_get_closed_bugs_per_member_stats(project))
iocaine_tasks = base_counters.copy()
iocaine_tasks.update(_get_iocaine_tasks_per_member_stats(project))
wiki_changes = base_counters.copy()
wiki_changes.update(_get_wiki_changes_per_member_stats(project))
created_bugs = base_counters.copy()
created_bugs.update(_get_created_bugs_per_member_stats(project))
closed_tasks = base_counters.copy()
closed_tasks.update(_get_closed_tasks_per_member_stats(project))
member_stats = {
"closed_bugs": closed_bugs,
"iocaine_tasks": iocaine_tasks,
"wiki_changes": wiki_changes,
"created_bugs": created_bugs,
"closed_tasks": closed_tasks,
}
return member_stats
| agpl-3.0 |
tqchen/tvm | python/tvm/rpc/__init__.py | 5 | 1388 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Lightweight TVM RPC module.
RPC enables connect to a remote server, upload and launch functions.
This is useful to for cross-compile and remote testing,
The compiler stack runs on local server, while we use RPC server
to run on remote runtime which don't have a compiler available.
The test program compiles the program on local server,
upload and run remote RPC server, get the result back to verify correctness.
"""
from .server import Server
from .client import connect, connect_tracker
from .client import RPCSession, LocalSession, PopenSession, TrackerSession
from .minrpc import with_minrpc
| apache-2.0 |
repotvsupertuga/tvsupertuga.repository | script.module.unidecode/lib/unidecode/x0fa.py | 252 | 4406 | data = (
'Chey ', # 0x00
'Thak ', # 0x01
'Thak ', # 0x02
'Thang ', # 0x03
'Thayk ', # 0x04
'Thong ', # 0x05
'Pho ', # 0x06
'Phok ', # 0x07
'Hang ', # 0x08
'Hang ', # 0x09
'Hyen ', # 0x0a
'Hwak ', # 0x0b
'Wu ', # 0x0c
'Huo ', # 0x0d
'[?] ', # 0x0e
'[?] ', # 0x0f
'Zhong ', # 0x10
'[?] ', # 0x11
'Qing ', # 0x12
'[?] ', # 0x13
'[?] ', # 0x14
'Xi ', # 0x15
'Zhu ', # 0x16
'Yi ', # 0x17
'Li ', # 0x18
'Shen ', # 0x19
'Xiang ', # 0x1a
'Fu ', # 0x1b
'Jing ', # 0x1c
'Jing ', # 0x1d
'Yu ', # 0x1e
'[?] ', # 0x1f
'Hagi ', # 0x20
'[?] ', # 0x21
'Zhu ', # 0x22
'[?] ', # 0x23
'[?] ', # 0x24
'Yi ', # 0x25
'Du ', # 0x26
'[?] ', # 0x27
'[?] ', # 0x28
'[?] ', # 0x29
'Fan ', # 0x2a
'Si ', # 0x2b
'Guan ', # 0x2c
'[?]', # 0x2d
'[?]', # 0x2e
'[?]', # 0x2f
'[?]', # 0x30
'[?]', # 0x31
'[?]', # 0x32
'[?]', # 0x33
'[?]', # 0x34
'[?]', # 0x35
'[?]', # 0x36
'[?]', # 0x37
'[?]', # 0x38
'[?]', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'[?]', # 0x3e
'[?]', # 0x3f
'[?]', # 0x40
'[?]', # 0x41
'[?]', # 0x42
'[?]', # 0x43
'[?]', # 0x44
'[?]', # 0x45
'[?]', # 0x46
'[?]', # 0x47
'[?]', # 0x48
'[?]', # 0x49
'[?]', # 0x4a
'[?]', # 0x4b
'[?]', # 0x4c
'[?]', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'[?]', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'[?]', # 0x66
'[?]', # 0x67
'[?]', # 0x68
'[?]', # 0x69
'[?]', # 0x6a
'[?]', # 0x6b
'[?]', # 0x6c
'[?]', # 0x6d
'[?]', # 0x6e
'[?]', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'[?]', # 0x82
'[?]', # 0x83
'[?]', # 0x84
'[?]', # 0x85
'[?]', # 0x86
'[?]', # 0x87
'[?]', # 0x88
'[?]', # 0x89
'[?]', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'[?]', # 0x90
'[?]', # 0x91
'[?]', # 0x92
'[?]', # 0x93
'[?]', # 0x94
'[?]', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'[?]', # 0x99
'[?]', # 0x9a
'[?]', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'[?]', # 0xa0
'[?]', # 0xa1
'[?]', # 0xa2
'[?]', # 0xa3
'[?]', # 0xa4
'[?]', # 0xa5
'[?]', # 0xa6
'[?]', # 0xa7
'[?]', # 0xa8
'[?]', # 0xa9
'[?]', # 0xaa
'[?]', # 0xab
'[?]', # 0xac
'[?]', # 0xad
'[?]', # 0xae
'[?]', # 0xaf
'[?]', # 0xb0
'[?]', # 0xb1
'[?]', # 0xb2
'[?]', # 0xb3
'[?]', # 0xb4
'[?]', # 0xb5
'[?]', # 0xb6
'[?]', # 0xb7
'[?]', # 0xb8
'[?]', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-2.0 |
chiamingyen/pygroup | wsgi/static/Brython2.2.0rc0-20140913-093500/Lib/tokenize.py | 728 | 24424 | """Tokenization help for Python programs.
tokenize(readline) is a generator that breaks a stream of bytes into
Python tokens. It decodes the bytes according to PEP-0263 for
determining source file encoding.
It accepts a readline-like method which is called repeatedly to get the
next line of input (or b"" for EOF). It generates 5-tuples with these
members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators. Additionally, all token lists start with an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
'Michael Foord')
import builtins
import re
import sys
from token import *
from codecs import lookup, BOM_UTF8
import collections
from io import TextIOWrapper
cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
import token
__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
"NL", "untokenize", "ENCODING", "TokenInfo"]
del token
COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
NL = N_TOKENS + 1
tok_name[NL] = 'NL'
ENCODING = N_TOKENS + 2
tok_name[ENCODING] = 'ENCODING'
N_TOKENS += 3
EXACT_TOKEN_TYPES = {
'(': LPAR,
')': RPAR,
'[': LSQB,
']': RSQB,
':': COLON,
',': COMMA,
';': SEMI,
'+': PLUS,
'-': MINUS,
'*': STAR,
'/': SLASH,
'|': VBAR,
'&': AMPER,
'<': LESS,
'>': GREATER,
'=': EQUAL,
'.': DOT,
'%': PERCENT,
'{': LBRACE,
'}': RBRACE,
'==': EQEQUAL,
'!=': NOTEQUAL,
'<=': LESSEQUAL,
'>=': GREATEREQUAL,
'~': TILDE,
'^': CIRCUMFLEX,
'<<': LEFTSHIFT,
'>>': RIGHTSHIFT,
'**': DOUBLESTAR,
'+=': PLUSEQUAL,
'-=': MINEQUAL,
'*=': STAREQUAL,
'/=': SLASHEQUAL,
'%=': PERCENTEQUAL,
'&=': AMPEREQUAL,
'|=': VBAREQUAL,
'^=': CIRCUMFLEXEQUAL,
'<<=': LEFTSHIFTEQUAL,
'>>=': RIGHTSHIFTEQUAL,
'**=': DOUBLESTAREQUAL,
'//': DOUBLESLASH,
'//=': DOUBLESLASHEQUAL,
'@': AT
}
class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
def __repr__(self):
annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
self._replace(type=annotated_type))
@property
def exact_type(self):
if self.type == OP and self.string in EXACT_TOKEN_TYPES:
return EXACT_TOKEN_TYPES[self.string]
else:
return self.type
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
# Note: we use unicode matching for names ("\w") but ascii matching for
# number literals.
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'\w+'
Hexnumber = r'0[xX][0-9a-fA-F]+'
Binnumber = r'0[bB][01]+'
Octnumber = r'0[oO][0-7]+'
Decnumber = r'(?:0+|[1-9][0-9]*)'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?[0-9]+'
Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
Expfloat = r'[0-9]+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
StringPrefix = r'(?:[bB][rR]?|[rR][bB]?|[uU])?'
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group(StringPrefix + "'''", StringPrefix + '"""')
# Single-line ' or " string.
String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
r"//=?", r"->",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
def _compile(expr):
return re.compile(expr, re.UNICODE)
endpats = {"'": Single, '"': Double,
"'''": Single3, '"""': Double3,
"r'''": Single3, 'r"""': Double3,
"b'''": Single3, 'b"""': Double3,
"R'''": Single3, 'R"""': Double3,
"B'''": Single3, 'B"""': Double3,
"br'''": Single3, 'br"""': Double3,
"bR'''": Single3, 'bR"""': Double3,
"Br'''": Single3, 'Br"""': Double3,
"BR'''": Single3, 'BR"""': Double3,
"rb'''": Single3, 'rb"""': Double3,
"Rb'''": Single3, 'Rb"""': Double3,
"rB'''": Single3, 'rB"""': Double3,
"RB'''": Single3, 'RB"""': Double3,
"u'''": Single3, 'u"""': Double3,
"R'''": Single3, 'R"""': Double3,
"U'''": Single3, 'U"""': Double3,
'r': None, 'R': None, 'b': None, 'B': None,
'u': None, 'U': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"b'''", 'b"""', "B'''", 'B"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""',
"rb'''", 'rb"""', "rB'''", 'rB"""',
"Rb'''", 'Rb"""', "RB'''", 'RB"""',
"u'''", 'u"""', "U'''", 'U"""',
):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"b'", 'b"', "B'", 'B"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"' ,
"rb'", 'rb"', "rB'", 'rB"',
"Rb'", 'Rb"', "RB'", 'RB"' ,
"u'", 'u"', "U'", 'U"',
):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
self.encoding = None
def add_whitespace(self, start):
row, col = start
assert row <= self.prev_row
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
for t in iterable:
if len(t) == 2:
self.compat(t, iterable)
break
tok_type, token, start, end, line = t
if tok_type == ENCODING:
self.encoding = token
continue
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
startline = False
indents = []
toks_append = self.tokens.append
toknum, tokval = token
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
prevstring = False
for tok in iterable:
toknum, tokval = tok[:2]
if toknum == ENCODING:
self.encoding = tokval
continue
if toknum in (NAME, NUMBER):
tokval += ' '
# Insert a space between two consecutive strings
if toknum == STRING:
if prevstring:
tokval = ' ' + tokval
prevstring = True
else:
prevstring = False
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
def untokenize(iterable):
"""Transform tokens back into Python source code.
It returns a bytes object, encoded using the ENCODING
token, which is the first token sequence output by tokenize.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output bytes will tokenize the back to the input
t1 = [tok[:2] for tok in tokenize(f.readline)]
newcode = untokenize(t1)
readline = BytesIO(newcode).readline
t2 = [tok[:2] for tok in tokenize(readline)]
assert t1 == t2
"""
ut = Untokenizer()
out = ut.untokenize(iterable)
if ut.encoding is not None:
out = out.encode(ut.encoding)
return out
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argment, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
try:
filename = readline.__self__.name
except AttributeError:
filename = None
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
# Decode as UTF-8. Either the line is an encoding declaration,
# in which case it should be pure ASCII, or it must be UTF-8
# per default encoding.
line_string = line.decode('utf-8')
except UnicodeDecodeError:
msg = "invalid or missing encoding declaration"
if filename is not None:
msg = '{} for {!r}'.format(msg, filename)
raise SyntaxError(msg)
match = cookie_re.match(line_string)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
if filename is None:
msg = "unknown encoding: " + encoding
else:
msg = "unknown encoding for {!r}: {}".format(filename,
encoding)
raise SyntaxError(msg)
if bom_found:
if encoding != 'utf-8':
# This behaviour mimics the Python interpreter
if filename is None:
msg = 'encoding problem: utf-8'
else:
msg = 'encoding problem for {!r}: utf-8'.format(filename)
raise SyntaxError(msg)
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def open(filename):
"""Open a file in read only mode using the encoding detected by
detect_encoding().
"""
buffer = builtins.open(filename, 'rb')
encoding, lines = detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, line_buffering=True)
text.mode = 'r'
return text
def tokenize(readline):
"""
The tokenize() generator requires one argment, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as bytes. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile, 'rb').__next__ # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
The first token sequence will always be an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""
# This import is here to avoid problems when the itertools module is not
# built yet and tokenize is imported.
from itertools import chain, repeat
encoding, consumed = detect_encoding(readline)
rl_gen = iter(readline, b"")
empty = repeat(b"")
return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
def _tokenize(readline, encoding):
lnum = parenlev = continued = 0
numchars = '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
if encoding is not None:
if encoding == "utf-8-sig":
# BOM will already have been stripped.
encoding = "utf-8"
yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
while True: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = b''
if encoding is not None:
line = line.decode(encoding)
lnum += 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield TokenInfo(STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield TokenInfo(ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ':
column += 1
elif line[pos] == '\t':
column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f':
column = 0
else:
break
pos += 1
if pos == max:
break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield TokenInfo(COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield TokenInfo(NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield TokenInfo((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = _compile(PseudoToken).match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
if start == end:
continue
token, initial = line[start:end], line[start]
if (initial in numchars or # ordinary number
(initial == '.' and token != '.' and token != '...')):
yield TokenInfo(NUMBER, token, spos, epos, line)
elif initial in '\r\n':
yield TokenInfo(NL if parenlev > 0 else NEWLINE,
token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield TokenInfo(COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = _compile(endpats[token])
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield TokenInfo(STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = _compile(endpats[initial] or
endpats[token[1]] or
endpats[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield TokenInfo(STRING, token, spos, epos, line)
elif initial.isidentifier(): # ordinary name
yield TokenInfo(NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
continued = 1
else:
if initial in '([{':
parenlev += 1
elif initial in ')]}':
parenlev -= 1
yield TokenInfo(OP, token, spos, epos, line)
else:
yield TokenInfo(ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos += 1
for indent in indents[1:]: # pop remaining indent levels
yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
# An undocumented, backwards compatible, API for all the places in the standard
# library that expect to be able to use tokenize with strings
def generate_tokens(readline):
return _tokenize(readline, None)
def main():
import argparse
# Helper error handling routines
def perror(message):
print(message, file=sys.stderr)
def error(message, filename=None, location=None):
if location:
args = (filename,) + location + (message,)
perror("%s:%d:%d: error: %s" % args)
elif filename:
perror("%s: error: %s" % (filename, message))
else:
perror("error: %s" % message)
sys.exit(1)
# Parse the arguments and options
parser = argparse.ArgumentParser(prog='python -m tokenize')
parser.add_argument(dest='filename', nargs='?',
metavar='filename.py',
help='the file to tokenize; defaults to stdin')
parser.add_argument('-e', '--exact', dest='exact', action='store_true',
help='display token names using the exact type')
args = parser.parse_args()
try:
# Tokenize the input
if args.filename:
filename = args.filename
with builtins.open(filename, 'rb') as f:
tokens = list(tokenize(f.readline))
else:
filename = "<stdin>"
tokens = _tokenize(sys.stdin.readline, None)
# Output the tokenization
for token in tokens:
token_type = token.type
if args.exact:
token_type = token.exact_type
token_range = "%d,%d-%d,%d:" % (token.start + token.end)
print("%-20s%-15s%-15r" %
(token_range, tok_name[token_type], token.string))
except IndentationError as err:
line, column = err.args[1][1:3]
error(err.args[0], filename, (line, column))
except TokenError as err:
line, column = err.args[1]
error(err.args[0], filename, (line, column))
except SyntaxError as err:
error(err, filename)
except IOError as err:
error(err)
except KeyboardInterrupt:
print("interrupted\n")
except Exception as err:
perror("unexpected error: %s" % err)
raise
if __name__ == "__main__":
main()
| gpl-2.0 |
Jayflux/servo | tests/wpt/web-platform-tests/tools/third_party/py/testing/root/test_xmlgen.py | 55 | 4005 |
import py
from py._xmlgen import unicode, html, raw
import sys
class ns(py.xml.Namespace):
pass
def test_escape():
uvalue = py.builtin._totext('\xc4\x85\xc4\x87\n\xe2\x82\xac\n', 'utf-8')
class A:
def __unicode__(self):
return uvalue
def __str__(self):
x = self.__unicode__()
if sys.version_info[0] < 3:
return x.encode('utf-8')
return x
y = py.xml.escape(uvalue)
assert y == uvalue
x = py.xml.escape(A())
assert x == uvalue
if sys.version_info[0] < 3:
assert isinstance(x, unicode)
assert isinstance(y, unicode)
y = py.xml.escape(uvalue.encode('utf-8'))
assert y == uvalue
def test_tag_with_text():
x = ns.hello("world")
u = unicode(x)
assert u == "<hello>world</hello>"
def test_class_identity():
assert ns.hello is ns.hello
def test_tag_with_text_and_attributes():
x = ns.some(name="hello", value="world")
assert x.attr.name == 'hello'
assert x.attr.value == 'world'
u = unicode(x)
assert u == '<some name="hello" value="world"/>'
def test_tag_with_subclassed_attr_simple():
class my(ns.hello):
class Attr(ns.hello.Attr):
hello="world"
x = my()
assert x.attr.hello == 'world'
assert unicode(x) == '<my hello="world"/>'
def test_tag_with_raw_attr():
x = html.object(data=raw('&'))
assert unicode(x) == '<object data="&"></object>'
def test_tag_nested():
x = ns.hello(ns.world())
unicode(x) # triggers parentifying
assert x[0].parent is x
u = unicode(x)
assert u == '<hello><world/></hello>'
def test_list_nested():
x = ns.hello([ns.world()]) #pass in a list here
u = unicode(x)
assert u == '<hello><world/></hello>'
def test_tag_xmlname():
class my(ns.hello):
xmlname = 'world'
u = unicode(my())
assert u == '<world/>'
def test_tag_with_text_entity():
x = ns.hello('world & rest')
u = unicode(x)
assert u == "<hello>world & rest</hello>"
def test_tag_with_text_and_attributes_entity():
x = ns.some(name="hello & world")
assert x.attr.name == "hello & world"
u = unicode(x)
assert u == '<some name="hello & world"/>'
def test_raw():
x = ns.some(py.xml.raw("<p>literal</p>"))
u = unicode(x)
assert u == "<some><p>literal</p></some>"
def test_html_name_stickyness():
class my(html.p):
pass
x = my("hello")
assert unicode(x) == '<p>hello</p>'
def test_stylenames():
class my:
class body(html.body):
style = html.Style(font_size = "12pt")
u = unicode(my.body())
assert u == '<body style="font-size: 12pt"></body>'
def test_class_None():
t = html.body(class_=None)
u = unicode(t)
assert u == '<body></body>'
def test_alternating_style():
alternating = (
html.Style(background="white"),
html.Style(background="grey"),
)
class my(html):
class li(html.li):
def style(self):
i = self.parent.index(self)
return alternating[i%2]
style = property(style)
x = my.ul(
my.li("hello"),
my.li("world"),
my.li("42"))
u = unicode(x)
assert u == ('<ul><li style="background: white">hello</li>'
'<li style="background: grey">world</li>'
'<li style="background: white">42</li>'
'</ul>')
def test_singleton():
h = html.head(html.link(href="foo"))
assert unicode(h) == '<head><link href="foo"/></head>'
h = html.head(html.script(src="foo"))
assert unicode(h) == '<head><script src="foo"></script></head>'
def test_inline():
h = html.div(html.span('foo'), html.span('bar'))
assert (h.unicode(indent=2) ==
'<div><span>foo</span><span>bar</span></div>')
def test_object_tags():
o = html.object(html.object())
assert o.unicode(indent=0) == '<object><object></object></object>'
| mpl-2.0 |
BEUTIFULSKIN/python-design | Composite/yiwang_composite.py | 1 | 1720 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-05-03 16:00:12
# @Author : Yi Wang (gzytemail@126.com)
# @Link : https://github.com/wang2222qq
# @des : 组合模式
# 主要用于树型结构让你可以优化处理递归或分级数据结构
class Material:
def __init__(self, code):
self.code = code
def add(self, material_obj):
pass
def remove(code):
pass
def get_composite(self, level):
pass
class RawMaterial(Material):
def add(self, material_obj):
raise SyntaxError("RawMaterial can't add material_obj")
def get_composite(self, level):
strtemp = "-" * level
strtemp += self.code
print strtemp
class Bom(Material):
def __init__(self, code):
self.code = code
self._compisite = []
def add(self, material_obj):
self._compisite.append({'code': material_obj.code,
'obj': material_obj})
def remove(code):
for index, item in enumerate(self._compisite):
if code == item['code']:
self._compisite.remove(index)
print("remove raw material:%s from bom:%s OK" % (code, self.code))
break
def get_composite(self, level):
strtemp = "-" * level
strtemp += self.code
print strtemp
for item in self._compisite:
item['obj'].get_composite(level + 2)
def client():
bom = Bom('BOM00001')
bom.add(RawMaterial(u'螺丝钉'))
bom.add(RawMaterial(u'螺母'))
bom2 = Bom('BOM00002')
bom2.add(RawMaterial(u'乳胶'))
bom.add(bom2)
bom.get_composite(1)
if __name__ == '__main__':
client() | mit |
wdwvt1/qiime | tests/test_beta_metrics.py | 15 | 11751 | #!/usr/bin/env python
__author__ = "Justin Kuczynski"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Rob Knight", "Justin Kuczynski"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "justin kuczynski"
__email__ = "justinak@gmail.com"
"""Contains tests for beta_metrics functions."""
import os.path
import numpy
from unittest import TestCase, main
from numpy.testing import assert_almost_equal
from cogent.maths.unifrac.fast_unifrac import fast_unifrac
from qiime.parse import make_envs_dict
from qiime.beta_metrics import (
_reorder_unifrac_res,
make_unifrac_metric,
make_unifrac_row_metric)
from qiime.parse import parse_newick
from cogent.core.tree import PhyloNode
from cogent.maths.unifrac.fast_tree import (unifrac)
import warnings
class FunctionTests(TestCase):
def setUp(self):
self.l19_data = numpy.array([
[7, 1, 0, 0, 0, 0, 0, 0, 0],
[4, 2, 0, 0, 0, 1, 0, 0, 0],
[2, 4, 0, 0, 0, 1, 0, 0, 0],
[1, 7, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 0, 0, 0, 0, 0, 0, 0],
[0, 7, 1, 0, 0, 0, 0, 0, 0],
[0, 4, 2, 0, 0, 0, 2, 0, 0],
[0, 2, 4, 0, 0, 0, 1, 0, 0],
[0, 1, 7, 0, 0, 0, 0, 0, 0],
[0, 0, 8, 0, 0, 0, 0, 0, 0],
[0, 0, 7, 1, 0, 0, 0, 0, 0],
[0, 0, 4, 2, 0, 0, 0, 3, 0],
[0, 0, 2, 4, 0, 0, 0, 1, 0],
[0, 0, 1, 7, 0, 0, 0, 0, 0],
[0, 0, 0, 8, 0, 0, 0, 0, 0],
[0, 0, 0, 7, 1, 0, 0, 0, 0],
[0, 0, 0, 4, 2, 0, 0, 0, 4],
[0, 0, 0, 2, 4, 0, 0, 0, 1],
[0, 0, 0, 1, 7, 0, 0, 0, 0]
])
self.l19_sample_names = [
'sam1', 'sam2', 'sam3', 'sam4', 'sam5', 'sam6',
'sam7', 'sam8', 'sam9', 'sam_middle', 'sam11', 'sam12', 'sam13',
'sam14', 'sam15', 'sam16', 'sam17', 'sam18', 'sam19']
self.l19_taxon_names = ['tax1', 'tax2', 'tax3', 'tax4', 'endbigtaxon',
'tax6', 'tax7', 'tax8', 'tax9']
self.l19_treestr = '((((tax7:0.1,tax3:0.2):.98,tax8:.3, tax4:.3):.4, ' +\
'((tax1:0.3, tax6:.09):0.43,tax2:0.4):0.5):.2,' +\
'(tax9:0.3, endbigtaxon:.08));'
def test_reorder_unifrac_res(self):
""" reorder_unifrac_res should correctly reorder a misordered 3x3 matrix
"""
mtx = numpy.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], 'float')
unifrac_mtx = numpy.array([[1, 3, 2],
[7, 9, 8],
[4, 6, 5]], 'float')
sample_names = ['yo', "it's", "samples"]
unifrac_sample_names = ['yo', "samples", "it's"]
reordered_mtx = _reorder_unifrac_res(
[unifrac_mtx, unifrac_sample_names],
sample_names)
assert_almost_equal(reordered_mtx, mtx)
def test_make_unifrac_metric(self):
""" exercise of the unweighted unifrac metric should not throw errors"""
tree = parse_newick(self.l19_treestr, PhyloNode)
unif = make_unifrac_metric(False, unifrac, True)
res = unif(self.l19_data, self.l19_taxon_names, tree,
self.l19_sample_names)
envs = make_envs_dict(self.l19_data, self.l19_sample_names,
self.l19_taxon_names)
unifrac_mat, unifrac_names = fast_unifrac(tree, envs,
modes=['distance_matrix'])['distance_matrix']
assert_almost_equal(res, _reorder_unifrac_res([unifrac_mat,
unifrac_names], self.l19_sample_names))
self.assertEqual(res[0, 0], 0)
self.assertEqual(res[0, 3], 0.0)
self.assertNotEqual(res[0, 1], 1.0)
def test_make_unifrac_metric2(self):
""" samples with no seqs, and identical samples, should behave correctly
"""
tree = parse_newick(self.l19_treestr, PhyloNode)
unif = make_unifrac_metric(False, unifrac, True)
otu_data = numpy.array([
[0, 0, 0, 0, 0, 0, 0, 0, 0], # sam1 zeros
[4, 2, 0, 0, 0, 1, 0, 0, 0],
[2, 4, 0, 0, 0, 1, 0, 0, 0],
[1, 7, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 0, 0, 0, 0, 0, 0, 0],
[0, 7, 1, 0, 0, 0, 0, 0, 0],
[0, 4, 2, 0, 0, 0, 2, 0, 0],
[0, 2, 4, 0, 0, 0, 1, 0, 0],
[0, 1, 7, 0, 0, 0, 0, 0, 0],
[0, 0, 8, 0, 0, 0, 0, 0, 0],
[0, 0, 7, 1, 0, 0, 0, 0, 0],
[0, 0, 4, 2, 0, 0, 0, 3, 0],
[0, 0, 2, 4, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0], # sam14 zeros
[0, 0, 0, 8, 0, 0, 0, 0, 0],
[0, 0, 2, 4, 0, 0, 0, 1, 0], # sam 16 now like sam 13
[0, 0, 0, 4, 2, 0, 0, 0, 4],
[0, 0, 0, 2, 4, 0, 0, 0, 1],
[0, 0, 0, 1, 7, 0, 0, 0, 0]
])
warnings.filterwarnings('ignore')
res = unif(otu_data, self.l19_taxon_names, tree,
self.l19_sample_names)
envs = make_envs_dict(self.l19_data, self.l19_sample_names,
self.l19_taxon_names)
self.assertEqual(res[0, 0], 0)
self.assertEqual(res[0, 13], 0.0)
self.assertEqual(res[12, 15], 0.0)
self.assertEqual(res[0, 1], 1.0)
warnings.resetwarnings()
def test_make_unifrac_metric3(self):
treestr = '((((tax7:0.1):.98,tax8:.3, tax4:.3):.4, ' +\
'((tax6:.09):0.43):0.5):.2,' +\
'(tax9:0.3, endbigtaxon:.08));' # taxa 1,2,3 removed
tree = parse_newick(treestr, PhyloNode)
otu_data = numpy.array([
[7, 1, 0, 0, 0, 0, 0, 0, 0], # 1 now zeros
[4, 2, 0, 0, 0, 1, 0, 0, 0],
[2, 4, 0, 0, 0, 1, 0, 0, 0],
[1, 7, 0, 0, 0, 0, 0, 0, 0], # 4 now zeros
[0, 8, 0, 0, 0, 0, 0, 0, 0],
[0, 7, 1, 0, 0, 0, 0, 0, 0],
[0, 4, 2, 0, 0, 0, 2, 0, 0],
[0, 2, 4, 0, 0, 0, 1, 0, 0],
[0, 1, 7, 0, 0, 0, 0, 0, 0],
[0, 0, 8, 0, 0, 0, 0, 0, 0],
[0, 0, 7, 1, 0, 0, 0, 0, 0],
[0, 0, 4, 2, 0, 0, 0, 3, 0],
[0, 0, 2, 4, 0, 0, 0, 1, 0],
[0, 0, 1, 7, 0, 0, 0, 0, 0],
[0, 0, 0, 8, 0, 0, 0, 0, 0],
[0, 0, 0, 7, 1, 0, 0, 0, 0],
[0, 0, 0, 4, 2, 0, 0, 0, 4],
[0, 0, 0, 2, 4, 0, 0, 0, 1],
[0, 0, 0, 1, 7, 0, 0, 0, 0]
])
unif = make_unifrac_metric(False, unifrac, True)
warnings.filterwarnings('ignore')
res = unif(otu_data, self.l19_taxon_names, tree,
self.l19_sample_names)
warnings.resetwarnings()
envs = make_envs_dict(self.l19_data, self.l19_sample_names,
self.l19_taxon_names)
self.assertEqual(res[0, 0], 0)
self.assertEqual(res[0, 3], 0.0)
self.assertEqual(res[0, 1], 1.0)
def test_make_unifrac_row_metric3(self):
treestr = '((((tax7:0.1):.98,tax8:.3, tax4:.3):.4, ' +\
'((tax6:.09):0.43):0.5):.2,' +\
'(tax9:0.3, endbigtaxon:.08));' # taxa 1,2,3 removed
tree = parse_newick(treestr, PhyloNode)
otu_data = numpy.array([
[7, 1, 0, 0, 0, 0, 0, 0, 0], # 1 now zeros
[4, 2, 0, 0, 0, 1, 0, 0, 0],
[2, 4, 0, 0, 0, 1, 0, 0, 0],
[1, 7, 0, 0, 0, 0, 0, 0, 0], # 4 now zeros
[0, 8, 0, 0, 0, 0, 0, 0, 0],
[0, 7, 1, 0, 0, 0, 0, 0, 0],
[0, 4, 2, 0, 0, 0, 2, 0, 0],
[0, 2, 4, 0, 0, 0, 1, 0, 0],
[0, 1, 7, 0, 0, 0, 0, 0, 0],
[0, 0, 8, 0, 0, 0, 0, 0, 0],
[0, 0, 7, 1, 0, 0, 0, 0, 0],
[0, 0, 4, 2, 0, 0, 0, 3, 0],
[0, 0, 2, 4, 0, 0, 0, 1, 0],
[0, 0, 1, 7, 0, 0, 0, 0, 0],
[0, 0, 0, 8, 0, 0, 0, 0, 0],
[0, 0, 0, 7, 1, 0, 0, 0, 0],
[0, 0, 0, 4, 2, 0, 0, 0, 4],
[0, 0, 0, 2, 4, 0, 0, 0, 1],
[0, 0, 0, 1, 7, 0, 0, 0, 0]
])
unif = make_unifrac_metric(False, unifrac, True)
warnings.filterwarnings('ignore')
res = unif(otu_data, self.l19_taxon_names, tree,
self.l19_sample_names)
warnings.resetwarnings()
envs = make_envs_dict(self.l19_data, self.l19_sample_names,
self.l19_taxon_names)
self.assertEqual(res[0, 0], 0)
self.assertEqual(res[0, 3], 0.0)
self.assertEqual(res[0, 1], 1.0)
warnings.filterwarnings('ignore')
unif_row = make_unifrac_row_metric(False, unifrac, True)
for i, sam_name in enumerate(self.l19_sample_names):
if i in [0, 3, 4, 5, 8, 9]:
continue
# these have no data and are warned "meaningless".
# I Would prefer if they matched res anyway though
res_row = unif_row(otu_data, self.l19_taxon_names, tree,
self.l19_sample_names, sam_name)
for j in range(len(self.l19_sample_names)):
if j in [0, 3, 4, 5, 8, 9]:
continue # ok if meaningless number in zero sample
self.assertAlmostEqual(res_row[j], res[i, j])
warnings.resetwarnings()
def test_make_unifrac_row_metric2(self):
""" samples with no seqs, and identical samples, should behave correctly
"""
tree = parse_newick(self.l19_treestr, PhyloNode)
unif = make_unifrac_metric(False, unifrac, True)
otu_data = numpy.array([
[0, 0, 0, 0, 0, 0, 0, 0, 0], # sam1 zeros
[4, 2, 0, 0, 0, 1, 0, 0, 0],
[2, 4, 0, 0, 0, 1, 0, 0, 0],
[1, 7, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 0, 0, 0, 0, 0, 0, 0],
[0, 7, 1, 0, 0, 0, 0, 0, 0],
[0, 4, 2, 0, 0, 0, 2, 0, 0],
[0, 2, 4, 0, 0, 0, 1, 0, 0],
[0, 1, 7, 0, 0, 0, 0, 0, 0],
[0, 0, 8, 0, 0, 0, 0, 0, 0],
[0, 0, 7, 1, 0, 0, 0, 0, 0],
[0, 0, 4, 2, 0, 0, 0, 3, 0],
[0, 0, 2, 4, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0], # sam14 zeros
[0, 0, 0, 8, 0, 0, 0, 0, 0],
[0, 0, 2, 4, 0, 0, 0, 1, 0], # sam 16 now like sam 13
[0, 0, 0, 4, 2, 0, 0, 0, 4],
[0, 0, 0, 2, 4, 0, 0, 0, 1],
[0, 0, 0, 1, 7, 0, 0, 0, 0]
])
warnings.filterwarnings('ignore')
res = unif(otu_data, self.l19_taxon_names, tree,
self.l19_sample_names)
envs = make_envs_dict(self.l19_data, self.l19_sample_names,
self.l19_taxon_names)
self.assertEqual(res[0, 0], 0)
self.assertEqual(res[0, 13], 0.0)
self.assertEqual(res[12, 15], 0.0)
self.assertEqual(res[0, 1], 1.0)
warnings.resetwarnings()
warnings.filterwarnings('ignore')
unif_row = make_unifrac_row_metric(False, unifrac, True)
for i, sam_name in enumerate(self.l19_sample_names):
if i in [0]:
continue
# these have no data and are warned "meaningless".
# I Would prefer if they matched res anyway though
res_row = unif_row(otu_data, self.l19_taxon_names, tree,
self.l19_sample_names, sam_name)
for j in range(len((self.l19_sample_names))):
if j in [0]:
continue # ok if meaningless number in zero sample
self.assertEqual(res_row[j], res[i, j])
warnings.resetwarnings()
# run tests if called from command line
if __name__ == '__main__':
main()
| gpl-2.0 |
juliushaertl/i3pystatus | i3pystatus/core/util.py | 1 | 17760 | import collections
import functools
import re
import socket
import string
import inspect
from threading import Timer, RLock
def lchop(string, prefix):
"""Removes a prefix from string
:param string: String, possibly prefixed with prefix
:param prefix: Prefix to remove from string
:returns: string without the prefix
"""
if string.startswith(prefix):
return string[len(prefix):]
return string
def popwhile(predicate, iterable):
"""Generator function yielding items of iterable while predicate holds for each item
:param predicate: function taking an item returning bool
:param iterable: iterable
:returns: iterable (generator function)
"""
while iterable:
item = iterable.pop()
if predicate(item):
yield item
else:
break
def partition(iterable, limit, key=lambda x: x):
def pop_partition():
sum = 0.0
while sum < limit and iterable:
sum += key(iterable[-1])
yield iterable.pop()
partitions = []
iterable.sort(reverse=True)
while iterable:
partitions.append(list(pop_partition()))
return partitions
def round_dict(dic, places):
"""
Rounds all values in a dict containing only numeric types to `places` decimal places.
If places is None, round to INT.
"""
if places is None:
for key, value in dic.items():
dic[key] = round(value)
else:
for key, value in dic.items():
dic[key] = round(value, places)
class ModuleList(collections.UserList):
def __init__(self, status_handler, class_finder):
self.status_handler = status_handler
self.finder = class_finder
super().__init__()
def append(self, module, *args, **kwargs):
module = self.finder.instanciate_class_from_module(
module, *args, **kwargs)
module.registered(self.status_handler)
super().append(module)
return module
def get(self, find_id):
find_id = int(find_id)
for module in self:
if id(module) == find_id:
return module
class KeyConstraintDict(collections.UserDict):
"""
A dict implementation with sets of valid and required keys
:param valid_keys: Set of valid keys
:param required_keys: Set of required keys, must be a subset of valid_keys
"""
class MissingKeys(Exception):
def __init__(self, keys):
self.keys = keys
def __init__(self, valid_keys, required_keys):
super().__init__()
self.valid_keys = valid_keys
self.required_keys = set(required_keys)
self.seen_keys = set()
def __setitem__(self, key, value):
"""Trying to add an invalid key will raise KeyError
"""
if key in self.valid_keys:
self.seen_keys.add(key)
self.data[key] = value
else:
raise KeyError(key)
def __delitem__(self, key):
self.seen_keys.remove(key)
del self.data[key]
def __iter__(self):
"""Iteration will raise a MissingKeys exception unless all required keys are set
"""
if self.missing():
raise self.MissingKeys(self.missing())
return self.data.__iter__()
def missing(self):
"""Returns a set of keys that are required but not set
"""
return self.required_keys - (self.seen_keys & self.required_keys)
def convert_position(pos, json):
if pos < 0:
pos = len(json) + (pos + 1)
return pos
def flatten(l):
"""
Flattens a hierarchy of nested lists into a single list containing all elements in order
:param l: list of arbitrary types and lists
:returns: list of arbitrary types
"""
l = list(l)
i = 0
while i < len(l):
while isinstance(l[i], list):
if not l[i]:
l.pop(i)
i -= 1
break
else:
l[i:i + 1] = l[i]
i += 1
return l
def formatp(string, **kwargs):
"""
Function for advanced format strings with partial formatting
This function consumes format strings with groups enclosed in brackets. A
group enclosed in brackets will only become part of the result if all fields
inside the group evaluate True in boolean contexts.
Groups can be nested. The fields in a nested group do not count as fields in
the enclosing group, i.e. the enclosing group will evaluate to an empty
string even if a nested group would be eligible for formatting. Nesting is
thus equivalent to a logical or of all enclosing groups with the enclosed
group.
Escaped brackets, i.e. \\\\[ and \\\\] are copied verbatim to output.
:param string: Format string
:param kwargs: keyword arguments providing data for the format string
:returns: Formatted string
"""
def build_stack(string):
"""
Builds a stack with OpeningBracket, ClosingBracket and String tokens.
Tokens have a level property denoting their nesting level.
They also have a string property containing associated text (empty for
all tokens but String tokens).
"""
class Token:
string = ""
class OpeningBracket(Token):
pass
class ClosingBracket(Token):
pass
class String(Token):
def __init__(self, str):
self.string = str
TOKENS = {
"[": OpeningBracket,
"]": ClosingBracket,
}
stack = []
# Index of next unconsumed char
next = 0
# Last consumed char
prev = ""
# Current char
char = ""
# Current level
level = 0
while next < len(string):
prev = char
char = string[next]
next += 1
if prev != "\\" and char in TOKENS:
token = TOKENS[char]()
token.index = next
if char == "]":
level -= 1
token.level = level
if char == "[":
level += 1
stack.append(token)
else:
if stack and isinstance(stack[-1], String):
stack[-1].string += char
else:
token = String(char)
token.level = level
stack.append(token)
return stack
def build_tree(items, level=0):
"""
Builds a list-of-lists tree (in forward order) from a stack (reversed order),
and formats the elements on the fly, discarding everything not eligible for
inclusion.
"""
subtree = []
while items:
nested = []
while items[0].level > level:
nested.append(items.pop(0))
if nested:
subtree.append(build_tree(nested, level + 1))
item = items.pop(0)
if item.string:
string = item.string
if level == 0:
subtree.append(string.format(**kwargs))
else:
fields = re.findall(r"({(\w+)[^}]*})", string)
successful_fields = 0
for fieldspec, fieldname in fields:
if kwargs.get(fieldname, False):
successful_fields += 1
if successful_fields == len(fields):
subtree.append(string.format(**kwargs))
else:
return []
return subtree
def merge_tree(items):
return "".join(flatten(items)).replace("\]", "]").replace("\[", "[")
stack = build_stack(string)
tree = build_tree(stack, 0)
return merge_tree(tree)
class TimeWrapper:
"""
A wrapper that implements __format__ and __bool__ for time differences and time spans.
:param seconds: seconds (numeric)
:param default_format: the default format to be used if no explicit format_spec is passed to __format__
Format string syntax:
* %h, %m and %s are the hours, minutes and seconds without leading zeros (i.e. 0 to 59 for minutes and seconds)
* %H, %M and %S are padded with a leading zero to two digits, i.e. 00 to 59
* %l and %L produce hours non-padded and padded but only if hours is not zero. If the hours are zero it produces an empty string.
* %% produces a literal %
* %E (only valid on beginning of the string) if the time is null, don't format anything but rather produce an empty string. If the time is non-null it is removed from the string.
The formatted string is stripped, i.e. spaces on both ends of the result are removed
"""
class TimeTemplate(string.Template):
delimiter = "%"
idpattern = r"[a-zA-Z]"
def __init__(self, seconds, default_format="%m:%S"):
self.seconds = int(seconds)
self.default_format = default_format
def __bool__(self):
""":returns: `bool(seconds)`, i.e. False if seconds == 0 and True otherwise
"""
return bool(self.seconds)
def __format__(self, format_spec):
"""Formats the time span given the format_spec (or the default_format).
"""
format_spec = format_spec or self.default_format
h = self.seconds // 3600
m, s = divmod(self.seconds % 3600, 60)
l = h if h else ""
L = "%02d" % h if h else ""
if format_spec.startswith("%E"):
format_spec = format_spec[2:]
if not self.seconds:
return ""
return self.TimeTemplate(format_spec).substitute(
h=h, m=m, s=s,
H="%02d" % h, M="%02d" % m, S="%02d" % s,
l=l, L=L,
).strip()
def require(predicate):
"""Decorator factory for methods requiring a predicate. If the
predicate is not fulfilled during a method call, the method call
is skipped and None is returned.
:param predicate: A callable returning a truth value
:returns: Method decorator
.. seealso::
:py:class:`internet`
"""
def decorator(method):
@functools.wraps(method)
def wrapper(*args, **kwargs):
if predicate():
return method(*args, **kwargs)
return None
return wrapper
return decorator
class internet:
"""
Checks for internet connection by connecting to a server.
Used server is determined by the `address` class variable which consists of
server host name and port number.
:rtype: bool
.. seealso::
:py:func:`require`
"""
address = ("google-public-dns-a.google.com", 53)
def __new__(cls):
try:
socket.create_connection(cls.address, 1).close()
return True
except OSError:
return False
def make_graph(values, lower_limit=0.0, upper_limit=100.0, style="blocks"):
"""
Draws a graph made of unicode characters.
:param values: An array of values to graph.
:param lower_limit: Minimum value for the y axis (or None for dynamic).
:param upper_limit: Maximum value for the y axis (or None for dynamic).
:param style: Drawing style ('blocks', 'braille-fill', 'braille-peak', or 'braille-snake').
:returns: Bar as a string
"""
values = [float(n) for n in values]
mn, mx = min(values), max(values)
mn = mn if lower_limit is None else min(mn, float(lower_limit))
mx = mx if upper_limit is None else max(mx, float(upper_limit))
extent = mx - mn
if style == 'blocks':
bar = '_▁▂▃▄▅▆▇█'
bar_count = len(bar) - 1
if extent == 0:
graph = '_' * len(values)
else:
graph = ''.join(bar[int((n - mn) / extent * bar_count)] for n in values)
elif style in ['braille-fill', 'braille-peak', 'braille-snake']:
# idea from https://github.com/asciimoo/drawille
# unicode values from http://en.wikipedia.org/wiki/Braille
vpad = values if len(values) % 2 == 0 else values + [mn]
vscale = [round(4 * (vp - mn) / extent) for vp in vpad]
l = len(vscale) // 2
# do the 2-character collapse separately for clarity
if 'fill' in style:
vbits = [[0, 0x40, 0x44, 0x46, 0x47][vs] for vs in vscale]
elif 'peak' in style:
vbits = [[0, 0x40, 0x04, 0x02, 0x01][vs] for vs in vscale]
else:
assert('snake' in style)
# there are a few choices for what to put last in vb2.
# arguable vscale[-1] from the _previous_ call is best.
vb2 = [vscale[0]] + vscale + [0]
vbits = []
for i in range(1, l + 1):
c = 0
for j in range(min(vb2[i - 1], vb2[i], vb2[i + 1]), vb2[i] + 1):
c |= [0, 0x40, 0x04, 0x02, 0x01][j]
vbits.append(c)
# 2-character collapse
graph = ''
for i in range(0, l, 2):
b1 = vbits[i]
b2 = vbits[i + 1]
if b2 & 0x40:
b2 = b2 - 0x30
b2 = b2 << 3
graph += chr(0x2800 + b1 + b2)
else:
raise NotImplementedError("Graph drawing style '%s' unimplemented." % style)
return graph
def make_vertical_bar(percentage, width=1):
"""
Draws a vertical bar made of unicode characters.
:param value: A value between 0 and 100
:param width: How many characters wide the bar should be.
:returns: Bar as a String
"""
bar = ' _▁▂▃▄▅▆▇█'
percentage //= 10
if percentage < 0:
output = bar[0]
elif percentage >= len(bar):
output = bar[-1]
else:
output = bar[percentage]
return output * width
def make_bar(percentage):
"""
Draws a bar made of unicode box characters.
:param percentage: A value between 0 and 100
:returns: Bar as a string
"""
bars = [' ', '▏', '▎', '▍', '▌', '▋', '▋', '▊', '▊', '█']
tens = int(percentage / 10)
ones = int(percentage) - tens * 10
result = tens * '█'
if(ones >= 1):
result = result + bars[ones]
result = result + (10 - len(result)) * ' '
return result
def user_open(url_or_command):
"""Open the specified paramater in the web browser if a URL is detected,
othewrise pass the paramater to the shell as a subprocess. This function
is inteded to bu used in on_leftclick/on_rightclick callbacks.
:param url_or_command: String containing URL or command
"""
from urllib.parse import urlparse
scheme = urlparse(url_or_command).scheme
if scheme == 'http' or scheme == 'https':
import webbrowser
import os
# webbrowser.open() sometimes prints a message for some reason and confuses i3
# Redirect stdout briefly to prevent this from happening.
savout = os.dup(1)
os.close(1)
os.open(os.devnull, os.O_RDWR)
try:
webbrowser.open(url_or_command)
finally:
os.dup2(savout, 1)
else:
import subprocess
subprocess.Popen(url_or_command, shell=True)
class MultiClickHandler(object):
def __init__(self, callback_handler, timeout):
self.callback_handler = callback_handler
self.timeout = timeout
self.lock = RLock()
self._timer_id = 0
self.timer = None
self.button = None
self.cb = None
def set_timer(self, button, cb):
with self.lock:
self.clear_timer()
self.timer = Timer(self.timeout,
self._timer_function,
args=[self._timer_id])
self.button = button
self.cb = cb
self.timer.start()
def clear_timer(self):
with self.lock:
if self.timer is None:
return
self._timer_id += 1 # Invalidate existent timer
self.timer.cancel() # Cancel the existent timer
self.timer = None
self.button = None
self.cb = None
def _timer_function(self, timer_id):
with self.lock:
if self._timer_id != timer_id:
return
self.callback_handler(self.button, self.cb)
self.clear_timer()
def check_double(self, button):
if self.timer is None:
return False
ret = True
if button != self.button:
self.callback_handler(self.button, self.cb)
ret = False
self.clear_timer()
return ret
def get_module(function):
"""Function decorator for retrieving the ``self`` argument from the stack.
Intended for use with callbacks that need access to a modules variables, for example:
.. code:: python
from i3pystatus import Status, get_module
from i3pystatus.core.command import execute
status = Status(...)
# other modules etc.
@get_module
def display_ip_verbose(module):
execute('sh -c "ip addr show dev {dev} | xmessage -file -"'.format(dev=module.interface))
status.register("network", interface="wlan1", on_leftclick=display_ip_verbose)
"""
@functools.wraps(function)
def call_wrapper(*args, **kwargs):
stack = inspect.stack()
caller_frame_info = stack[1]
self = caller_frame_info[0].f_locals["self"]
# not completly sure whether this is necessary
# see note in Python docs about stack frames
del stack
function(self, *args, **kwargs)
return call_wrapper
| mit |
willemneal/Docky | lib/unidecode/x029.py | 165 | 3584 | data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'', # 0x5f
'', # 0x60
'', # 0x61
'', # 0x62
'', # 0x63
'', # 0x64
'', # 0x65
'', # 0x66
'', # 0x67
'', # 0x68
'', # 0x69
'', # 0x6a
'', # 0x6b
'', # 0x6c
'', # 0x6d
'', # 0x6e
'', # 0x6f
'', # 0x70
'', # 0x71
'', # 0x72
'', # 0x73
'', # 0x74
'', # 0x75
'', # 0x76
'', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'', # 0x7d
'', # 0x7e
'', # 0x7f
'', # 0x80
'', # 0x81
'', # 0x82
'{', # 0x83
'} ', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'', # 0x8c
'', # 0x8d
'', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'', # 0xb0
'', # 0xb1
'', # 0xb2
'', # 0xb3
'', # 0xb4
'', # 0xb5
'', # 0xb6
'', # 0xb7
'', # 0xb8
'', # 0xb9
'', # 0xba
'', # 0xbb
'', # 0xbc
'', # 0xbd
'', # 0xbe
'', # 0xbf
'', # 0xc0
'', # 0xc1
'', # 0xc2
'', # 0xc3
'', # 0xc4
'', # 0xc5
'', # 0xc6
'', # 0xc7
'', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'', # 0xcc
'', # 0xcd
'', # 0xce
'', # 0xcf
'', # 0xd0
'', # 0xd1
'', # 0xd2
'', # 0xd3
'', # 0xd4
'', # 0xd5
'', # 0xd6
'', # 0xd7
'', # 0xd8
'', # 0xd9
'', # 0xda
'', # 0xdb
'', # 0xdc
'', # 0xdd
'', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'', # 0xe4
'', # 0xe5
'', # 0xe6
'', # 0xe7
'', # 0xe8
'', # 0xe9
'', # 0xea
'', # 0xeb
'', # 0xec
'', # 0xed
'', # 0xee
'', # 0xef
'', # 0xf0
'', # 0xf1
'', # 0xf2
'', # 0xf3
'', # 0xf4
'', # 0xf5
'', # 0xf6
'', # 0xf7
'', # 0xf8
'', # 0xf9
'', # 0xfa
'', # 0xfb
'', # 0xfc
'', # 0xfd
'', # 0xfe
)
| mit |
michalliu/OpenWrt-Firefly-Libraries | staging_dir/host/lib/python3.4/distutils/command/clean.py | 205 | 2776 | """distutils.command.clean
Implements the Distutils 'clean' command."""
# contributed by Bastian Kleineidam <calvin@cs.uni-sb.de>, added 2000-03-18
import os
from distutils.core import Command
from distutils.dir_util import remove_tree
from distutils import log
class clean(Command):
description = "clean up temporary files from 'build' command"
user_options = [
('build-base=', 'b',
"base build directory (default: 'build.build-base')"),
('build-lib=', None,
"build directory for all modules (default: 'build.build-lib')"),
('build-temp=', 't',
"temporary build directory (default: 'build.build-temp')"),
('build-scripts=', None,
"build directory for scripts (default: 'build.build-scripts')"),
('bdist-base=', None,
"temporary directory for built distributions"),
('all', 'a',
"remove all build output, not just temporary by-products")
]
boolean_options = ['all']
def initialize_options(self):
self.build_base = None
self.build_lib = None
self.build_temp = None
self.build_scripts = None
self.bdist_base = None
self.all = None
def finalize_options(self):
self.set_undefined_options('build',
('build_base', 'build_base'),
('build_lib', 'build_lib'),
('build_scripts', 'build_scripts'),
('build_temp', 'build_temp'))
self.set_undefined_options('bdist',
('bdist_base', 'bdist_base'))
def run(self):
# remove the build/temp.<plat> directory (unless it's already
# gone)
if os.path.exists(self.build_temp):
remove_tree(self.build_temp, dry_run=self.dry_run)
else:
log.debug("'%s' does not exist -- can't clean it",
self.build_temp)
if self.all:
# remove build directories
for directory in (self.build_lib,
self.bdist_base,
self.build_scripts):
if os.path.exists(directory):
remove_tree(directory, dry_run=self.dry_run)
else:
log.warn("'%s' does not exist -- can't clean it",
directory)
# just for the heck of it, try to remove the base build directory:
# we might have emptied it right now, but if not we don't care
if not self.dry_run:
try:
os.rmdir(self.build_base)
log.info("removing '%s'", self.build_base)
except OSError:
pass
| gpl-2.0 |
impowski/servo | tests/wpt/css-tests/tools/wptserve/wptserve/router.py | 316 | 5895 | import itertools
import re
import types
from logger import get_logger
any_method = object()
class RouteTokenizer(object):
def literal(self, scanner, token):
return ("literal", token)
def slash(self, scanner, token):
return ("slash", None)
def group(self, scanner, token):
return ("group", token[1:-1])
def star(self, scanner, token):
return ("star", token[1:-3])
def scan(self, input_str):
scanner = re.Scanner([(r"/", self.slash),
(r"{\w*}", self.group),
(r"\*", self.star),
(r"(?:\\.|[^{\*/])*", self.literal),])
return scanner.scan(input_str)
class RouteCompiler(object):
def __init__(self):
self.reset()
def reset(self):
self.star_seen = False
def compile(self, tokens):
self.reset()
func_map = {"slash":self.process_slash,
"literal":self.process_literal,
"group":self.process_group,
"star":self.process_star}
re_parts = ["^"]
if not tokens or tokens[0][0] != "slash":
tokens = itertools.chain([("slash", None)], tokens)
for token in tokens:
re_parts.append(func_map[token[0]](token))
if self.star_seen:
re_parts.append(")")
re_parts.append("$")
return re.compile("".join(re_parts))
def process_literal(self, token):
return re.escape(token[1])
def process_slash(self, token):
return "/"
def process_group(self, token):
if self.star_seen:
raise ValueError("Group seen after star in regexp")
return "(?P<%s>[^/]+)" % token[1]
def process_star(self, token):
if self.star_seen:
raise ValueError("Star seen after star in regexp")
self.star_seen = True
return "(.*"
def compile_path_match(route_pattern):
"""tokens: / or literal or match or *"""
tokenizer = RouteTokenizer()
tokens, unmatched = tokenizer.scan(route_pattern)
assert unmatched is "", unmatched
compiler = RouteCompiler()
return compiler.compile(tokens)
class Router(object):
"""Object for matching handler functions to requests.
:param doc_root: Absolute path of the filesystem location from
which to serve tests
:param routes: Initial routes to add; a list of three item tuples
(method, path_pattern, handler_function), defined
as for register()
"""
def __init__(self, doc_root, routes):
self.doc_root = doc_root
self.routes = []
self.logger = get_logger()
for route in reversed(routes):
self.register(*route)
def register(self, methods, path, handler):
"""Register a handler for a set of paths.
:param methods: Set of methods this should match. "*" is a
special value indicating that all methods should
be matched.
:param path_pattern: Match pattern that will be used to determine if
a request path matches this route. Match patterns
consist of either literal text, match groups,
denoted {name}, which match any character except /,
and, at most one \*, which matches and character and
creates a match group to the end of the string.
If there is no leading "/" on the pattern, this is
automatically implied. For example::
api/{resource}/*.json
Would match `/api/test/data.json` or
`/api/test/test2/data.json`, but not `/api/test/data.py`.
The match groups are made available in the request object
as a dictionary through the route_match property. For
example, given the route pattern above and the path
`/api/test/data.json`, the route_match property would
contain::
{"resource": "test", "*": "data.json"}
:param handler: Function that will be called to process matching
requests. This must take two parameters, the request
object and the response object.
"""
if type(methods) in types.StringTypes or methods in (any_method, "*"):
methods = [methods]
for method in methods:
self.routes.append((method, compile_path_match(path), handler))
self.logger.debug("Route pattern: %s" % self.routes[-1][1].pattern)
def get_handler(self, request):
"""Get a handler for a request or None if there is no handler.
:param request: Request to get a handler for.
:rtype: Callable or None
"""
for method, regexp, handler in reversed(self.routes):
if (request.method == method or
method in (any_method, "*") or
(request.method == "HEAD" and method == "GET")):
m = regexp.match(request.url_parts.path)
if m:
if not hasattr(handler, "__class__"):
name = handler.__name__
else:
name = handler.__class__.__name__
self.logger.debug("Found handler %s" % name)
match_parts = m.groupdict().copy()
if len(match_parts) < len(m.groups()):
match_parts["*"] = m.groups()[-1]
request.route_match = match_parts
return handler
return None
| mpl-2.0 |
kiyoto/statsmodels | statsmodels/sandbox/gam.py | 33 | 15421 | """
Generalized additive models
Requirements for smoothers
--------------------------
smooth(y, weights=xxx) : ? no return ? alias for fit
predict(x=None) : smoothed values, fittedvalues or for new exog
df_fit() : degress of freedom of fit ?
Notes
-----
- using PolySmoother works for AdditiveModel, and GAM with Poisson and Binomial
- testfailure with Gamma, no other families tested
- there is still an indeterminacy in the split up of the constant across
components (smoothers) and alpha, sum, i.e. constant, looks good.
- role of offset, that I haven't tried to figure out yet
Refactoring
-----------
currently result is attached to model instead of other way around
split up Result in class for AdditiveModel and for GAM,
subclass GLMResults, needs verification that result statistics are appropriate
how much inheritance, double inheritance?
renamings and cleanup
interface to other smoothers, scipy splines
basic unittests as support for refactoring exist, but we should have a test
case for gamma and the others. Advantage of PolySmoother is that we can
benchmark against the parametric GLM results.
"""
# JP:
# changes: use PolySmoother instead of crashing bspline
# TODO: check/catalogue required interface of a smoother
# TODO: replace default smoother by corresponding function to initialize
# other smoothers
# TODO: fix iteration, don't define class with iterator methods, use looping;
# add maximum iteration and other optional stop criteria
# fixed some of the dimension problems in PolySmoother,
# now graph for example looks good
# NOTE: example script is now in examples folder
#update: I did some of the above, see module docstring
from statsmodels.compat.python import next, range
import numpy as np
from statsmodels.genmod import families
from statsmodels.sandbox.nonparametric.smoothers import PolySmoother
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.tools.sm_exceptions import IterationLimitWarning, iteration_limit_doc
import warnings
DEBUG = False
def default_smoother(x, s_arg=None):
'''
'''
# _x = x.copy()
# _x.sort()
_x = np.sort(x)
n = x.shape[0]
# taken form smooth.spline in R
#if n < 50:
if n < 500:
nknots = n
else:
a1 = np.log(50) / np.log(2)
a2 = np.log(100) / np.log(2)
a3 = np.log(140) / np.log(2)
a4 = np.log(200) / np.log(2)
if n < 200:
nknots = 2**(a1 + (a2 - a1) * (n - 50)/150.)
elif n < 800:
nknots = 2**(a2 + (a3 - a2) * (n - 200)/600.)
elif n < 3200:
nknots = 2**(a3 + (a4 - a3) * (n - 800)/2400.)
else:
nknots = 200 + (n - 3200.)**0.2
knots = _x[np.linspace(0, n-1, nknots).astype(np.int32)]
#s = SmoothingSpline(knots, x=x.copy())
#when I set order=2, I get nans in the GAM prediction
if s_arg is None:
order = 3 #what about knots? need smoother *args or **kwds
else:
order = s_arg
s = PolySmoother(order, x=x.copy()) #TODO: change order, why copy?
# s.gram(d=2)
# s.target_df = 5
return s
class Offset(object):
def __init__(self, fn, offset):
self.fn = fn
self.offset = offset
def __call__(self, *args, **kw):
return self.fn(*args, **kw) + self.offset
class Results(object):
def __init__(self, Y, alpha, exog, smoothers, family, offset):
self.nobs, self.k_vars = exog.shape #assumes exog is 2d
#weird: If I put the previous line after the definition of self.mu,
# then the attributed don't get added
self.Y = Y
self.alpha = alpha
self.smoothers = smoothers
self.offset = offset
self.family = family
self.exog = exog
self.offset = offset
self.mu = self.linkinversepredict(exog) #TODO: remove __call__
def __call__(self, exog):
'''expected value ? check new GLM, same as mu for given exog
maybe remove this
'''
return self.linkinversepredict(exog)
def linkinversepredict(self, exog): #TODO what's the name in GLM
'''expected value ? check new GLM, same as mu for given exog
'''
return self.family.link.inverse(self.predict(exog))
def predict(self, exog):
'''predict response, sum of smoothed components
TODO: What's this in the case of GLM, corresponds to X*beta ?
'''
#note: sum is here over axis=0,
#TODO: transpose in smoothed and sum over axis=1
#BUG: there is some inconsistent orientation somewhere
#temporary hack, won't work for 1d
#print dir(self)
#print 'self.nobs, self.k_vars', self.nobs, self.k_vars
exog_smoothed = self.smoothed(exog)
#print 'exog_smoothed.shape', exog_smoothed.shape
if exog_smoothed.shape[0] == self.k_vars:
import warnings
warnings.warn("old orientation, colvars, will go away",
FutureWarning)
return np.sum(self.smoothed(exog), axis=0) + self.alpha
if exog_smoothed.shape[1] == self.k_vars:
return np.sum(exog_smoothed, axis=1) + self.alpha
else:
raise ValueError('shape mismatch in predict')
def smoothed(self, exog):
'''get smoothed prediction for each component
'''
#bug: with exog in predict I get a shape error
#print 'smoothed', exog.shape, self.smoothers[0].predict(exog).shape
#there was a mistake exog didn't have column index i
return np.array([self.smoothers[i].predict(exog[:,i]) + self.offset[i]
#shouldn't be a mistake because exog[:,i] is attached to smoother, but
#it is for different exog
#return np.array([self.smoothers[i].predict() + self.offset[i]
for i in range(exog.shape[1])]).T
def smoothed_demeaned(self, exog):
components = self.smoothed(exog)
means = components.mean(0)
constant = means.sum() + self.alpha
components_demeaned = components - means
return components_demeaned, constant
class AdditiveModel(object):
'''additive model with non-parametric, smoothed components
Parameters
----------
exog : ndarray
smoothers : None or list of smoother instances
smoother instances not yet checked
weights : None or ndarray
family : None or family instance
I think only used because of shared results with GAM and subclassing.
If None, then Gaussian is used.
'''
def __init__(self, exog, smoothers=None, weights=None, family=None):
self.exog = exog
if not weights is None:
self.weights = weights
else:
self.weights = np.ones(self.exog.shape[0])
self.smoothers = smoothers or [default_smoother(exog[:,i]) for i in range(exog.shape[1])]
#TODO: why do we set here df, refactoring temporary?
for i in range(exog.shape[1]):
self.smoothers[i].df = 10
if family is None:
self.family = families.Gaussian()
else:
self.family = family
#self.family = families.Gaussian()
def _iter__(self):
'''initialize iteration ?, should be removed
'''
self.iter = 0
self.dev = np.inf
return self
def next(self):
'''internal calculation for one fit iteration
BUG: I think this does not improve, what is supposed to improve
offset doesn't seem to be used, neither an old alpha
The smoothers keep coef/params from previous iteration
'''
_results = self.results
Y = self.results.Y
mu = _results.predict(self.exog)
#TODO offset is never used ?
offset = np.zeros(self.exog.shape[1], np.float64)
alpha = (Y * self.weights).sum() / self.weights.sum()
for i in range(self.exog.shape[1]):
tmp = self.smoothers[i].predict()
#TODO: check what smooth needs to do
#smooth (alias for fit, fit given x to new y and attach
#print 'next shape', (Y - alpha - mu + tmp).shape
bad = np.isnan(Y - alpha - mu + tmp).any()
if bad: #temporary assert while debugging
print(Y, alpha, mu, tmp)
raise ValueError("nan encountered")
#self.smoothers[i].smooth(Y - alpha - mu + tmp,
self.smoothers[i].smooth(Y - mu + tmp,
weights=self.weights)
tmp2 = self.smoothers[i].predict() #fittedvalues of previous smooth/fit
self.results.offset[i] = -(tmp2*self.weights).sum() / self.weights.sum()
#self.offset used in smoothed
if DEBUG:
print(self.smoothers[i].params)
mu += tmp2 - tmp
#change setting offset here: tests still pass, offset equal to constant
#in component ??? what's the effect of offset
offset = self.results.offset
#print self.iter
#self.iter += 1 #missing incrementing of iter counter NOT
return Results(Y, alpha, self.exog, self.smoothers, self.family, offset)
def cont(self):
'''condition to continue iteration loop
Parameters
----------
tol
Returns
-------
cont : bool
If true, then iteration should be continued.
'''
self.iter += 1 #moved here to always count, not necessary
if DEBUG:
print(self.iter, self.results.Y.shape)
print(self.results.predict(self.exog).shape, self.weights.shape)
curdev = (((self.results.Y - self.results.predict(self.exog))**2) * self.weights).sum()
if self.iter > self.maxiter: #kill it, no max iterationoption
return False
if np.fabs((self.dev - curdev) / curdev) < self.rtol:
self.dev = curdev
return False
#self.iter += 1
self.dev = curdev
return True
def df_resid(self):
'''degrees of freedom of residuals, ddof is sum of all smoothers df
'''
return self.results.Y.shape[0] - np.array([self.smoothers[i].df_fit() for i in range(self.exog.shape[1])]).sum()
def estimate_scale(self):
'''estimate standard deviation of residuals
'''
#TODO: remove use of self.results.__call__
return ((self.results.Y - self.results(self.exog))**2).sum() / self.df_resid()
def fit(self, Y, rtol=1.0e-06, maxiter=30):
'''fit the model to a given endogenous variable Y
This needs to change for consistency with statsmodels
'''
self.rtol = rtol
self.maxiter = maxiter
#iter(self) # what does this do? anything?
self._iter__()
mu = 0
alpha = (Y * self.weights).sum() / self.weights.sum()
offset = np.zeros(self.exog.shape[1], np.float64)
for i in range(self.exog.shape[1]):
self.smoothers[i].smooth(Y - alpha - mu,
weights=self.weights)
tmp = self.smoothers[i].predict()
offset[i] = (tmp * self.weights).sum() / self.weights.sum()
tmp -= tmp.sum()
mu += tmp
self.results = Results(Y, alpha, self.exog, self.smoothers, self.family, offset)
while self.cont():
self.results = self.next()
if self.iter >= self.maxiter:
warnings.warn(iteration_limit_doc, IterationLimitWarning)
return self.results
class Model(GLM, AdditiveModel):
#class Model(AdditiveModel):
#TODO: what does GLM do? Is it actually used ?
#only used in __init__, dropping it doesn't change results
#but where gets family attached now? - weird, it's Gaussian in this case now
#also where is the link defined?
#AdditiveModel overwrites family and sets it to Gaussian - corrected
#I think both GLM and AdditiveModel subclassing is only used in __init__
#niter = 2
# def __init__(self, exog, smoothers=None, family=family.Gaussian()):
# GLM.__init__(self, exog, family=family)
# AdditiveModel.__init__(self, exog, smoothers=smoothers)
# self.family = family
def __init__(self, endog, exog, smoothers=None, family=families.Gaussian()):
#self.family = family
#TODO: inconsistent super __init__
AdditiveModel.__init__(self, exog, smoothers=smoothers, family=family)
GLM.__init__(self, endog, exog, family=family)
assert self.family is family #make sure we got the right family
def next(self):
_results = self.results
Y = _results.Y
if np.isnan(self.weights).all():
print("nanweights1")
_results.mu = self.family.link.inverse(_results.predict(self.exog))
#eta = _results.predict(self.exog)
#_results.mu = self.family.fitted(eta)
weights = self.family.weights(_results.mu)
if np.isnan(weights).all():
self.weights = weights
print("nanweights2")
self.weights = weights
if DEBUG:
print('deriv isnan', np.isnan(self.family.link.deriv(_results.mu)).any())
#Z = _results.predict(self.exog) + \
Z = _results.predict(self.exog) + \
self.family.link.deriv(_results.mu) * (Y - _results.mu) #- _results.alpha #?added alpha
m = AdditiveModel(self.exog, smoothers=self.smoothers,
weights=self.weights, family=self.family)
#TODO: I don't know what the next two lines do, Z, Y ? which is endog?
#Y is original endog, Z is endog for the next step in the iterative solver
_results = m.fit(Z)
self.history.append([Z, _results.predict(self.exog)])
_results.Y = Y
_results.mu = self.family.link.inverse(_results.predict(self.exog))
self.iter += 1
self.results = _results
return _results
def estimate_scale(self, Y=None):
"""
Return Pearson\'s X^2 estimate of scale.
"""
if Y is None:
Y = self.Y
resid = Y - self.results.mu
return (np.power(resid, 2) / self.family.variance(self.results.mu)).sum() \
/ self.df_resid #TODO check this
#/ AdditiveModel.df_resid(self) #what is the class doing here?
def fit(self, Y, rtol=1.0e-06, maxiter=30):
self.rtol = rtol
self.maxiter = maxiter
self.Y = np.asarray(Y, np.float64)
self.history = []
#iter(self)
self._iter__()
#TODO code duplication with next?
alpha = self.Y.mean()
mu0 = self.family.starting_mu(Y)
#Z = self.family.link(alpha) + self.family.link.deriv(alpha) * (Y - alpha)
Z = self.family.link(alpha) + self.family.link.deriv(alpha) * (Y - mu0)
m = AdditiveModel(self.exog, smoothers=self.smoothers, family=self.family)
self.results = m.fit(Z)
self.results.mu = self.family.link.inverse(self.results.predict(self.exog))
self.results.Y = Y
while self.cont():
self.results = self.next()
self.scale = self.results.scale = self.estimate_scale()
if self.iter >= self.maxiter:
import warnings
warnings.warn(iteration_limit_doc, IterationLimitWarning)
return self.results
| bsd-3-clause |
sysadminmatmoz/ingadhoc | project_issue_order/__openerp__.py | 4 | 1664 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Project Issue Order',
'version': '8.0.1.0.0',
'category': 'Projects & Services',
'sequence': 14,
'summary': '',
'description': """
Project Issue Order
===================
Add sequence field to issues and change default order to the following criteria:
"priority desc, sequence, date_deadline, duration, create_date desc"
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3',
'images': [
],
'depends': [
'project_issue',
],
'data': [
'project_issue_view.xml',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
} | agpl-3.0 |
40223105/2015cd_0512 | static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_discovery.py | 785 | 13838 | import os
import re
import sys
import unittest
class TestableTestProgram(unittest.TestProgram):
module = '__main__'
exit = True
defaultTest = failfast = catchbreak = buffer = None
verbosity = 1
progName = ''
testRunner = testLoader = None
def __init__(self):
pass
class TestDiscovery(unittest.TestCase):
# Heavily mocked tests so I can avoid hitting the filesystem
def test_get_name_from_path(self):
loader = unittest.TestLoader()
loader._top_level_dir = '/foo'
name = loader._get_name_from_path('/foo/bar/baz.py')
self.assertEqual(name, 'bar.baz')
if not __debug__:
# asserts are off
return
with self.assertRaises(AssertionError):
loader._get_name_from_path('/bar/baz.py')
def test_find_tests(self):
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
path_lists = [['test1.py', 'test2.py', 'not_a_test.py', 'test_dir',
'test.foo', 'test-not-a-module.py', 'another_dir'],
['test3.py', 'test4.py', ]]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
def isdir(path):
return path.endswith('dir')
os.path.isdir = isdir
self.addCleanup(restore_isdir)
def isfile(path):
# another_dir is not a package and so shouldn't be recursed into
return not path.endswith('dir') and not 'another_dir' in path
os.path.isfile = isfile
self.addCleanup(restore_isfile)
loader._get_module_from_name = lambda path: path + ' module'
loader.loadTestsFromModule = lambda module: module + ' tests'
top_level = os.path.abspath('/foo')
loader._top_level_dir = top_level
suite = list(loader._find_tests(top_level, 'test*.py'))
expected = [name + ' module tests' for name in
('test1', 'test2')]
expected.extend([('test_dir.%s' % name) + ' module tests' for name in
('test3', 'test4')])
self.assertEqual(suite, expected)
def test_find_tests_with_package(self):
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
directories = ['a_directory', 'test_directory', 'test_directory2']
path_lists = [directories, [], [], []]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
os.path.isdir = lambda path: True
self.addCleanup(restore_isdir)
os.path.isfile = lambda path: os.path.basename(path) not in directories
self.addCleanup(restore_isfile)
class Module(object):
paths = []
load_tests_args = []
def __init__(self, path):
self.path = path
self.paths.append(path)
if os.path.basename(path) == 'test_directory':
def load_tests(loader, tests, pattern):
self.load_tests_args.append((loader, tests, pattern))
return 'load_tests'
self.load_tests = load_tests
def __eq__(self, other):
return self.path == other.path
loader._get_module_from_name = lambda name: Module(name)
def loadTestsFromModule(module, use_load_tests):
if use_load_tests:
raise self.failureException('use_load_tests should be False for packages')
return module.path + ' module tests'
loader.loadTestsFromModule = loadTestsFromModule
loader._top_level_dir = '/foo'
# this time no '.py' on the pattern so that it can match
# a test package
suite = list(loader._find_tests('/foo', 'test*'))
# We should have loaded tests from the test_directory package by calling load_tests
# and directly from the test_directory2 package
self.assertEqual(suite,
['load_tests', 'test_directory2' + ' module tests'])
self.assertEqual(Module.paths, ['test_directory', 'test_directory2'])
# load_tests should have been called once with loader, tests and pattern
self.assertEqual(Module.load_tests_args,
[(loader, 'test_directory' + ' module tests', 'test*')])
def test_discover(self):
loader = unittest.TestLoader()
original_isfile = os.path.isfile
original_isdir = os.path.isdir
def restore_isfile():
os.path.isfile = original_isfile
os.path.isfile = lambda path: False
self.addCleanup(restore_isfile)
orig_sys_path = sys.path[:]
def restore_path():
sys.path[:] = orig_sys_path
self.addCleanup(restore_path)
full_path = os.path.abspath(os.path.normpath('/foo'))
with self.assertRaises(ImportError):
loader.discover('/foo/bar', top_level_dir='/foo')
self.assertEqual(loader._top_level_dir, full_path)
self.assertIn(full_path, sys.path)
os.path.isfile = lambda path: True
os.path.isdir = lambda path: True
def restore_isdir():
os.path.isdir = original_isdir
self.addCleanup(restore_isdir)
_find_tests_args = []
def _find_tests(start_dir, pattern):
_find_tests_args.append((start_dir, pattern))
return ['tests']
loader._find_tests = _find_tests
loader.suiteClass = str
suite = loader.discover('/foo/bar/baz', 'pattern', '/foo/bar')
top_level_dir = os.path.abspath('/foo/bar')
start_dir = os.path.abspath('/foo/bar/baz')
self.assertEqual(suite, "['tests']")
self.assertEqual(loader._top_level_dir, top_level_dir)
self.assertEqual(_find_tests_args, [(start_dir, 'pattern')])
self.assertIn(top_level_dir, sys.path)
def test_discover_with_modules_that_fail_to_import(self):
loader = unittest.TestLoader()
listdir = os.listdir
os.listdir = lambda _: ['test_this_does_not_exist.py']
isfile = os.path.isfile
os.path.isfile = lambda _: True
orig_sys_path = sys.path[:]
def restore():
os.path.isfile = isfile
os.listdir = listdir
sys.path[:] = orig_sys_path
self.addCleanup(restore)
suite = loader.discover('.')
self.assertIn(os.getcwd(), sys.path)
self.assertEqual(suite.countTestCases(), 1)
test = list(list(suite)[0])[0] # extract test from suite
with self.assertRaises(ImportError):
test.test_this_does_not_exist()
def test_command_line_handling_parseArgs(self):
program = TestableTestProgram()
args = []
def do_discovery(argv):
args.extend(argv)
program._do_discovery = do_discovery
program.parseArgs(['something', 'discover'])
self.assertEqual(args, [])
program.parseArgs(['something', 'discover', 'foo', 'bar'])
self.assertEqual(args, ['foo', 'bar'])
def test_command_line_handling_discover_by_default(self):
program = TestableTestProgram()
program.module = None
self.called = False
def do_discovery(argv):
self.called = True
self.assertEqual(argv, [])
program._do_discovery = do_discovery
program.parseArgs(['something'])
self.assertTrue(self.called)
def test_command_line_handling_discover_by_default_with_options(self):
program = TestableTestProgram()
program.module = None
args = ['something', '-v', '-b', '-v', '-c', '-f']
self.called = False
def do_discovery(argv):
self.called = True
self.assertEqual(argv, args[1:])
program._do_discovery = do_discovery
program.parseArgs(args)
self.assertTrue(self.called)
def test_command_line_handling_do_discovery_too_many_arguments(self):
class Stop(Exception):
pass
def usageExit():
raise Stop
program = TestableTestProgram()
program.usageExit = usageExit
with self.assertRaises(Stop):
# too many args
program._do_discovery(['one', 'two', 'three', 'four'])
def test_command_line_handling_do_discovery_calls_loader(self):
program = TestableTestProgram()
class Loader(object):
args = []
def discover(self, start_dir, pattern, top_level_dir):
self.args.append((start_dir, pattern, top_level_dir))
return 'tests'
program._do_discovery(['-v'], Loader=Loader)
self.assertEqual(program.verbosity, 2)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['--verbose'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery([], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['fish', 'eggs'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['fish', 'eggs', 'ham'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', 'ham')])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-s', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-t', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', 'fish')])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-p', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'fish', None)])
self.assertFalse(program.failfast)
self.assertFalse(program.catchbreak)
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-p', 'eggs', '-s', 'fish', '-v', '-f', '-c'],
Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', None)])
self.assertEqual(program.verbosity, 2)
self.assertTrue(program.failfast)
self.assertTrue(program.catchbreak)
def test_detect_module_clash(self):
class Module(object):
__file__ = 'bar/foo.py'
sys.modules['foo'] = Module
full_path = os.path.abspath('foo')
original_listdir = os.listdir
original_isfile = os.path.isfile
original_isdir = os.path.isdir
def cleanup():
os.listdir = original_listdir
os.path.isfile = original_isfile
os.path.isdir = original_isdir
del sys.modules['foo']
if full_path in sys.path:
sys.path.remove(full_path)
self.addCleanup(cleanup)
def listdir(_):
return ['foo.py']
def isfile(_):
return True
def isdir(_):
return True
os.listdir = listdir
os.path.isfile = isfile
os.path.isdir = isdir
loader = unittest.TestLoader()
mod_dir = os.path.abspath('bar')
expected_dir = os.path.abspath('foo')
msg = re.escape(r"'foo' module incorrectly imported from %r. Expected %r. "
"Is this module globally installed?" % (mod_dir, expected_dir))
self.assertRaisesRegex(
ImportError, '^%s$' % msg, loader.discover,
start_dir='foo', pattern='foo.py'
)
self.assertEqual(sys.path[0], full_path)
def test_discovery_from_dotted_path(self):
loader = unittest.TestLoader()
tests = [self]
expectedPath = os.path.abspath(os.path.dirname(unittest.test.__file__))
self.wasRun = False
def _find_tests(start_dir, pattern):
self.wasRun = True
self.assertEqual(start_dir, expectedPath)
return tests
loader._find_tests = _find_tests
suite = loader.discover('unittest.test')
self.assertTrue(self.wasRun)
self.assertEqual(suite._tests, tests)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
iwegner/MITK | Modules/Biophotonics/python/iMC/scripts/ipcai_to_theano/input_icai_data.py | 6 | 3612 | """
This tutorial introduces logistic regression using Theano and stochastic
gradient descent.
Logistic regression is a probabilistic, linear classifier. It is parametrized
by a weight matrix :math:`W` and a bias vector :math:`b`. Classification is
done by projecting data points onto a set of hyperplanes, the distance to
which is used to determine a class membership probability.
Mathematically, this can be written as:
.. math::
P(Y=i|x, W,b) &= softmax_i(W x + b) \\
&= \frac {e^{W_i x + b_i}} {\sum_j e^{W_j x + b_j}}
The output of the model or prediction is then done by taking the argmax of
the vector whose i'th element is P(Y=i|x).
.. math::
y_{pred} = argmax_i P(Y=i|x,W,b)
This tutorial presents a stochastic gradient descent optimization method
suitable for large datasets.
References:
- textbooks: "Pattern Recognition and Machine Learning" -
Christopher M. Bishop, section 4.3.2
"""
from __future__ import print_function
import os
import numpy
import pandas as pd
import numpy as np
import theano
import theano.tensor as T
from regression.preprocessing import preprocess
__docformat__ = 'restructedtext en'
def create_dataset(path_to_simulation_results):
df = pd.read_csv(path_to_simulation_results, header=[0, 1])
X, y = preprocess(df, snr=10.)
y = y.values
return X, y
def load_data(data_root):
''' Loads the dataset
:type dataset: string
:param dataset: the path to the dataset (here MNIST)
'''
TRAIN_IMAGES = os.path.join(data_root,
"ipcai_revision_colon_mean_scattering_train_all_spectrocam.txt")
TEST_IMAGES = os.path.join(data_root,
"ipcai_revision_colon_mean_scattering_test_all_spectrocam.txt")
train_set = create_dataset(TRAIN_IMAGES)
valid_set = create_dataset(TEST_IMAGES)
test_set = (np.load("sample_image.npy"), np.array([0]))
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
# When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets ous get around this issue
return shared_x, shared_y
test_set_x, test_set_y = shared_dataset(test_set, 0)
valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
| bsd-3-clause |
SOKP/external_chromium_org | chrome/test/ispy/server/main_view_handler.py | 88 | 4303 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Request handler to serve the main_view page."""
import jinja2
import json
import os
import re
import sys
import webapp2
import ispy_api
from common import constants
from common import ispy_utils
import gs_bucket
import views
JINJA = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(views.__file__)),
extensions=['jinja2.ext.autoescape'])
class MainViewHandler(webapp2.RequestHandler):
"""Request handler to serve the main_view page."""
def get(self):
"""Handles a get request to the main_view page.
If the test_run parameter is specified, then a page displaying all of
the failed runs in the test_run will be shown. Otherwise a view listing
all of the test_runs available for viewing will be displayed.
"""
test_run = self.request.get('test_run')
bucket = gs_bucket.GoogleCloudStorageBucket(constants.BUCKET)
ispy = ispy_utils.ISpyUtils(bucket)
# Load the view.
if test_run:
self._GetForTestRun(test_run, ispy)
return
self._GetAllTestRuns(ispy)
def _GetAllTestRuns(self, ispy):
"""Renders a list view of all of the test_runs available in GS.
Args:
ispy: An instance of ispy_api.ISpyApi.
"""
template = JINJA.get_template('list_view.html')
data = {}
max_keys = 1000
marker = 'failures/%s' % self.request.get('marker')
test_runs = list([path.split('/')[1] for path in
ispy.GetAllPaths('failures/', max_keys=max_keys,
marker=marker, delimiter='/')])
base_url = '/?test_run=%s'
next_url = '/?marker=%s' % test_runs[-1]
data['next_url'] = next_url
data['links'] = [(test_run, base_url % test_run) for test_run in test_runs]
self.response.write(template.render(data))
def _GetForTestRun(self, test_run, ispy):
"""Renders a sorted list of failure-rows for a given test_run.
This method will produce a list of failure-rows that are sorted
in descending order by number of different pixels.
Args:
test_run: The name of the test_run to render failure rows from.
ispy: An instance of ispy_api.ISpyApi.
"""
paths = set([path for path in ispy.GetAllPaths('failures/' + test_run)
if path.endswith('actual.png')])
can_rebaseline = ispy_api.ISpyApi(
ispy.cloud_bucket).CanRebaselineToTestRun(test_run)
rows = [self._CreateRow(test_run, path, ispy) for path in paths]
# Function that sorts by the different_pixels field in the failure-info.
def _Sorter(a, b):
return cmp(b['percent_different'],
a['percent_different'])
template = JINJA.get_template('main_view.html')
self.response.write(
template.render({'comparisons': sorted(rows, _Sorter),
'test_run': test_run,
'can_rebaseline': can_rebaseline}))
def _CreateRow(self, test_run, path, ispy):
"""Creates one failure-row.
This method builds a dictionary with the data necessary to display a
failure in the main_view html template.
Args:
test_run: The name of the test_run the failure is in.
path: A path to the failure's actual.png file.
ispy: An instance of ispy_api.ISpyApi.
Returns:
A dictionary with fields necessary to render a failure-row
in the main_view html template.
"""
res = {}
res['expectation'] = path.lstrip('/').split('/')[2]
res['test_run'] = test_run
res['info'] = json.loads(ispy.cloud_bucket.DownloadFile(
ispy_utils.GetFailurePath(res['test_run'], res['expectation'],
'info.txt')))
expected = ispy_utils.GetExpectationPath(
res['expectation'], 'expected.png')
diff = ispy_utils.GetFailurePath(test_run, res['expectation'], 'diff.png')
res['percent_different'] = res['info']['fraction_different'] * 100
res['expected_path'] = expected
res['diff_path'] = diff
res['actual_path'] = path
res['expected'] = ispy.cloud_bucket.GetImageURL(expected)
res['diff'] = ispy.cloud_bucket.GetImageURL(diff)
res['actual'] = ispy.cloud_bucket.GetImageURL(path)
return res
| bsd-3-clause |
pkleimert/hrpt | apps/survey/spec.py | 4 | 17104 | import simplejson as json
__all__ = ['Survey', 'Question', 'Profile', 'Response',
'Advise',
'If', 'ElseIf', 'Else',
'Empty', 'Equal', 'EqualIndex', 'In',
'And', 'Or', 'Not']
class Question(object):
question = None
type = None
private = False
blank = False
def __init__(self):
if not hasattr(self, 'id'):
self.id = self.__class__.__name__
if self.question is None:
raise SpecSyntaxError('Question for Question %s is not specified' % self.id)
if self.type is None:
raise SpecSyntaxError('Type for Question %s is not specified' % self.id)
parent = None
def visible(self, values):
parent = self.parent
while True:
# top level questions
if parent is None:
return True
# under Else
elif isinstance(parent, Else):
# print '>>> Else: prev:', parent.prev
pass
# the direct If/ElseIf parent should evaluate to True
elif not parent.condition.eval(values):
return False
# the adjacent If/ElseIf shoudl evaluate to False
while parent.prev is not None:
parent = parent.prev
if parent.condition.eval(values):
return False
# arrives at the first If, then go to its parent
parent = parent.parent
def get_condition(self):
parent = self.parent
condition = TrueValue()
while parent is not None:
if not isinstance(parent, Else):
condition = condition & parent.condition
while parent.prev is not None:
parent = parent.prev
condition = condition & ~parent.condition
parent = parent.parent
return condition
def get_usage(self, name):
# print '> get usage:', name
items = []
parent = self.parent
while parent is not None:
if not isinstance(parent, Else):
items += parent.get_usage(name)
while parent.prev is not None:
parent = parent.prev
items += parent.get_usage(name)
parent = parent.parent
return list(set(items))
def get_modifier(self):
return self.get_usage('question')
def get_profiles(self):
return self.get_usage('profile')
def get_responses(self):
return self.get_usage('response')
def __str__(self):
return '<Question [%s]: %s>' % (self.id, self.question)
class Advise(Question):
type = 'advise'
message = None
def __init__(self):
if not hasattr(self, 'id'):
self.id = self.__class__.__name__
if self.message is None:
raise SpecSyntaxError('Message for Advise %s is not specified' % self.id)
self.question = self.message
super(Advise, self).__init__()
class Survey(object):
id = None
rules = None
prefill = {}
'''
rules contains Question /class/ or Branch /objects/
'''
def __str__(self):
return '<Survey [%s]>' % self.id
class Profile(object):
def __init__(self, id):
"""id is question id"""
self.id = id
def __str__(self):
return '<Profile [%s]>' % self.id
class Response(object):
def __init__(self, id):
"""id is question id"""
self.id = id
def __str__(self):
return '<Response [%s]>' % self.id
##################
class SpecSyntaxError(Exception):
pass
class Boolean(object):
'''
Contains an expression that can be evaluated to a True/False value
'''
def eval(self, values):
raise NotImplementedError()
def __and__(self, other):
return And(self, other)
def __or__(self, other):
return Or(self, other)
def __invert__(self):
return Not(self)
def get_usage(self, name):
return []
class Evaluator(Boolean):
pass
class QuestionClass(object):
'''
Holds a class name of a question.
The id of the class will be resolved when evaluation time.
'''
def __init__(self, name):
'''
:param: Question name
'''
self.name = name
#
# Value evaluator
#
class Value(object):
def value(self, values):
raise NotImplementedError()
def get_usage(self, name):
return []
class QuestionValue(Value):
def __init__(self, name):
self.type = None
self.name = name
if type(name) == str:
self.type = 'id'
elif type(name) == QuestionClass:
self.name = name.name
self.type = 'cls'
elif issubclass(name, Advise):
self.name = name.__name__
self.type = 'advise'
elif issubclass(name, Question):
self.name = name.__name__
self.type = 'cls'
else:
raise SpecSyntaxError()
def value(self, values):
if self.type == 'id':
return values[self.name]
elif self.type == 'advise':
return True
elif self.type == 'cls':
id = values['+id'][self.name]
return values[id]
def __str__(self):
return '<Question [%s]>' % self.name
@property
def js(self):
return 'd.Question(%s)' % json.dumps(self.name)
def get_usage(self, name):
if name == 'question':
return [self.name]
return []
class ProfileValue(Value):
def __init__(self, id):
# print '@@@', id
self.id = id
def value(self, values):
return values['+p'][self.id]
def __str__(self):
return '<Profile [%s]>' % self.id
@property
def js(self):
return 'd.Profile(%s)' % json.dumps(self.id)
def get_usage(self, name):
# print '@@@ >>>>>>>>', name
if name == 'profile':
return [self.id]
return []
class ResponseValue(Value):
def __init__(self, id):
self.id = id
def value(self, values):
return values['+r'][self.id]
def __str__(self):
return '<Response [%s]>' % self.id
@property
def js(self):
return 'd.Response(%s)' % json.dumps(self.id)
def get_usage(self, name):
# print '@@@ >>>>>>>>', name
if name == 'response':
return [self.id]
return []
class Primitive(Value):
def __init__(self, value):
self.val = value
def value(self, values):
return self.val
def __str__(self):
return '<Primitive [%s]>' % json.dumps(self.val)
@property
def js(self):
if type(self.val) in [list, type, set]:
return json.dumps(list(self.val))
else:
return json.dumps(self.val)
class Empty(Evaluator):
"""Check if value is empty"""
def __init__(self, a):
"""a is Profile object, Response object, Question class or question id"""
if isinstance(a, Profile):
self.a = ProfileValue(a.id)
elif isinstance(a, Response):
self.a = ResponseValue(a.id)
else:
self.a = QuestionValue(a)
def eval(self, values):
value = self.a.value(values)
if type(value) in [list, set, tuple]:
return len(value) == 0
else:
return value is None
def __str__(self):
return '<Empty [%s]>' % (self.a,)
@property
def js(self):
return 'd.Empty(%s)' % self.a.js
def get_usage(self, name):
return self.a.get_usage(name)
class Equal(Evaluator):
"""Check if the two values are equal"""
def __init__(self, a, b):
"""a is Profile object, Response object, Question class or question id
b is Question class or primitives"""
if isinstance(a, Profile):
self.a = ProfileValue(a.id)
elif isinstance(a, Response):
self.a = ResponseValue(a.id)
else:
self.a = QuestionValue(a)
if type(b) in [str, int, float]:
self.b = Primitive(b)
elif isinstance(b, Profile):
self.b = ProfileValue(b.id)
elif isinstance(b, Response):
self.b = ResponseValue(b.id)
elif issubclass(b, Question):
self.b = QuestionValue(b)
else:
raise SpecSyntaxError()
def eval(self, values):
return self.a.value(values) == self.b.value(values) or \
str(self.a.value(values)) == str(self.b.value(values)) # FIXME XXX
def __str__(self):
return '<Equal [%s] [%s]>' % (self.a, self.b)
@property
def js(self):
return 'd.Equal(%s, %s)' % (self.a.js, self.b.js)
def get_usage(self, name):
# print 'Equal: get_usage:', name, repr(self.a), repr(self.b)
return self.a.get_usage(name) + self.b.get_usage(name)
E = Equal
class EqualIndex(Equal):
def __init__(self, a, index, b):
super(EqualIndex, self).__init__(a, b)
self.index = index
def eval(self, values):
a = int(self.a.value(values)[self.index])
b = self.b.value(values)
return a == b
@property
def js(self):
return 'd.EqualIndex(%s, %s, %s)' % (self.a.js, self.index, self.b.js)
class In(Evaluator):
"""Check if the first value is one of the second values"""
def __init__(self, a, b):
"""a is Profile object, Response object, Question class or question id
b is list/set/tuple of primitives"""
if isinstance(a, Profile):
self.a = ProfileValue(a.id)
elif isinstance(a, Response):
self.a = ResponseValue(a.id)
else:
self.a = QuestionValue(a)
if not type(b) in [tuple, list, set]:
raise SpecSyntaxError()
if not all([type(val) in [str, int, float] for val in b]):
raise SpecSyntaxError()
self.b = Primitive(list(b))
def eval(self, values):
# Force the value of the first argument into a list
a = self.a.value(values)
if not type(a) in [list, set, tuple]:
a = [a]
res = any([val in a for val in self.b.value(values)])
# FIXME XXX the value of might be a string instead of integer
try:
b = self.b.value(values)
a = map(int, a)
res = any([val in a for val in b])
except ValueError:
pass
return res
def __str__(self):
return '<In [%s] [%s]>' % (self.a, self.b)
@property
def js(self):
return 'd.In(%s, %s)' % (self.a.js, self.b.js)
def get_usage(self, name):
return self.a.get_usage(name) + self.b.get_usage(name)
class Compare(Evaluator):
def __init__(self, **comparisons):
"""key is Question class name
value is Question class or primitives"""
self.comparisons = []
for key, val in comparisons.items():
key = QuestionValue(QuestionClass(key))
if type(val) in [str, int, float]:
val = Primitive(val)
elif issubclass(val, Question):
val = QuestionValue(val)
else:
raise SpecSyntaxError()
self.comparisons.append(Equal(key, val))
def eval(self, values):
return all([comparison.eval(value) for comparison in self.comparisons])
def __str__(self):
return '<Compare [%s]>' % ' '.join([str(comparison) for comparison in self.comparisons])
@property
def js(self):
if len(self.comparisons) == 1:
return self.comparisons[0].js
return '(%s)' % (') && ('.join([comparison.js for comparison in self.comparisons]))
def get_usage(self, name):
return reduce(lambda a, b: (a + b),
[comparison.get_usage(name) for comparison in self.comparisons])
#
# Consts
#
class TrueValue(Boolean):
def eval(self, values):
return True
def __str__(self):
return '<TrueValue>'
@property
def js(self):
return 'd.BooleanTrue()';
#
# Operators
#
class And(Boolean):
'''
AND operator
'''
def __init__(self, a, *conditions):
conditions = [a] + list(conditions)
if not all([isinstance(condition, Boolean) for condition in conditions]):
raise SpecSyntaxError()
self.conditions = conditions
def eval(self, values):
return all([condition.eval(values) for condition in self.conditions])
def __str__(self):
return '<And [%s]>' % ('] ['.join([str(condition) for condition in self.conditions]))
@property
def js(self):
if len(self.conditions) == 1:
return self.conditions[0].js
return 'd.And(%s)' % (', '.join([condition.js for condition in self.conditions]))
def get_usage(self, name):
return reduce(lambda a, b: (a + b),
[condition.get_usage(name) for condition in self.conditions])
class Or(Boolean):
'''
OR operator
'''
def __init__(self, a, *conditions):
conditions = [a] + list(conditions)
if not all([isinstance(condition, Boolean) for condition in conditions]):
raise SpecSyntaxError()
self.conditions = conditions
def eval(self, values):
return any([condition.eval(values) for condition in self.conditions])
def __str__(self):
return '<Or [%s]>' % ('] ['.join([str(condition) for condition in self.conditions]))
@property
def js(self):
if len(self.conditions) == 1:
return self.conditions[0].js
return 'd.Or(%s)' % (', '.join([condition.js for condition in self.conditions]))
def get_usage(self, name):
return reduce(lambda a, b: (a + b),
[condition.get_usage(name) for condition in self.conditions])
class Not(Boolean):
'''
NOT operator
'''
def __init__(self, condition):
if not isinstance(condition, Boolean):
raise SpecSyntaxError()
self.condition = condition
def eval(self, values):
return not self.condition.eval(values)
def __str__(self):
return '<Not [%s]>' % self.condition
@property
def js(self):
return 'd.Not(%s)' % self.condition.js
def get_usage(self, name):
# print 'not get usage:', name
return self.condition.get_usage(name)
#
# Branching
#
class Branch(object):
'''
Conditional branching
'''
prev = None
parent = None
def get_usage(self, name):
return []
class BranchIf(Branch):
def __init__(self, condition, rules):
self.condition = condition
self.rules = validate_rules(rules, self)
def get_usage(self, name):
return self.condition.get_usage(name)
class BranchElseIf(Branch):
def __init__(self, condition, rules):
self.condition = condition
self.rules = validate_rules(rules, self)
def get_usage(self, name):
return self.condition.get_usage(name)
class Conditional(object):
'''
Conditional branching generator
'''
pass
class If(Conditional):
def __init__(self, condition, *conditions):
conds = [condition] + list(conditions)
if not all([isinstance(cond, Boolean) for cond in conds]):
raise SpecSyntaxError()
self.conditions = And(*conds)
def __call__(self, *rules):
if len(rules) == 0:
raise SpecSyntaxError()
return BranchIf(self.conditions, rules)
class ElseIf(Conditional):
def __init__(self, condition, *conditions):
conds = [condition] + list(conditions)
if not all([isinstance(cond, Boolean) for cond in conds]):
raise SpecSyntaxError()
self.conditions = And(*conds)
def __call__(self, *rules):
if len(rules) == 0:
raise SpecSyntaxError()
return BranchElseIf(self.conditions, rules)
class Else(Conditional, Branch):
def __init__(self, *rules):
if len(rules) == 0:
raise SpecSyntaxError()
self.rules = validate_rules(rules, self)
#
# Rule validator
#
def validate_rules(rules, parent=None):
res = []
prev = None
for rule in rules:
# print '>', rule
if issubclass(rule.__class__, Branch):
# branch object
klass = rule.__class__
if klass == BranchIf:
rule.prev = None
prev = rule
elif klass == BranchElseIf:
if prev is None:
raise SpecSyntaxError()
rule.prev = prev
prev = rule
elif klass == Else:
if prev is None:
raise SpecSyntaxError()
rule.prev = prev
prev = None
res.append(rule)
elif issubclass(rule, Question):
# question class
res.append(rule)
last = None
else:
raise SpecSyntaxError()
rule.parent = parent
return res
| agpl-3.0 |
michellemorales/OpenMM | models/compression/entropy_coder/all_models/all_models_test.py | 14 | 2447 | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic test of all registered models."""
import tensorflow as tf
# pylint: disable=unused-import
import all_models
# pylint: enable=unused-import
from entropy_coder.model import model_factory
class AllModelsTest(tf.test.TestCase):
def testBuildModelForTraining(self):
factory = model_factory.GetModelRegistry()
model_names = factory.GetAvailableModels()
for m in model_names:
tf.reset_default_graph()
global_step = tf.Variable(tf.zeros([], dtype=tf.int64),
trainable=False,
name='global_step')
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
batch_size = 3
height = 40
width = 20
depth = 5
binary_codes = tf.placeholder(dtype=tf.float32,
shape=[batch_size, height, width, depth])
# Create a model with the default configuration.
print('Creating model: {}'.format(m))
model = factory.CreateModel(m)
model.Initialize(global_step,
optimizer,
model.GetConfigStringForUnitTest())
self.assertTrue(model.loss is None, 'model: {}'.format(m))
self.assertTrue(model.train_op is None, 'model: {}'.format(m))
self.assertTrue(model.average_code_length is None, 'model: {}'.format(m))
# Build the Tensorflow graph corresponding to the model.
model.BuildGraph(binary_codes)
self.assertTrue(model.loss is not None, 'model: {}'.format(m))
self.assertTrue(model.average_code_length is not None,
'model: {}'.format(m))
if model.train_op is None:
print('Model {} is not trainable'.format(m))
if __name__ == '__main__':
tf.test.main()
| gpl-2.0 |
benoitsteiner/tensorflow-opencl | tensorflow/contrib/rnn/python/ops/lstm_ops.py | 11 | 24027 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LSTM Block Cell ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.contrib.rnn.ops import gen_lstm_ops
from tensorflow.contrib.rnn.python.ops import fused_rnn_cell
from tensorflow.contrib.util import loader
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import resource_loader
_lstm_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_lstm_ops.so"))
# pylint: disable=invalid-name
def _lstm_block_cell(x,
cs_prev,
h_prev,
w,
b,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
r"""Computes the LSTM cell forward propagation for 1 time step.
This implementation uses 1 weight matrix and 1 bias vector, and there's an
optional peephole connection.
This kernel op implements the following mathematical equations:
```python
xh = [x, h_prev]
[i, ci, f, o] = xh * w + b
f = f + forget_bias
if not use_peephole:
wci = wcf = wco = 0
i = sigmoid(cs_prev * wci + i)
f = sigmoid(cs_prev * wcf + f)
ci = tanh(ci)
cs = ci .* i + cs_prev .* f
cs = clip(cs, cell_clip)
o = sigmoid(cs * wco + o)
co = tanh(cs)
h = co .* o
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`.
The input to the LSTM cell, shape (batch_size, num_inputs).
cs_prev: A `Tensor`. Must have the same type as `x`.
Value of the cell state at previous time step.
h_prev: A `Tensor`. Must have the same type as `x`.
Output of the previous cell at previous time step.
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
b: A `Tensor`. Must have the same type as `x`. The bias vector.
wci: A `Tensor`. Must have the same type as `x`.
The weight matrix for input gate peephole connection.
wcf: A `Tensor`. Must have the same type as `x`.
The weight matrix for forget gate peephole connection.
wco: A `Tensor`. Must have the same type as `x`.
The weight matrix for output gate peephole connection.
forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
cell_clip: An optional `float`. Defaults to `-1` (no clipping).
Value to clip the 'cs' value to. Disable by setting to negative value.
use_peephole: An optional `bool`. Defaults to `False`.
Whether to use peephole weights.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A `Tensor`. Has the same type as `x`. The input gate.
cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh.
f: A `Tensor`. Has the same type as `x`. The forget gate.
o: A `Tensor`. Has the same type as `x`. The output gate.
ci: A `Tensor`. Has the same type as `x`. The cell input.
co: A `Tensor`. Has the same type as `x`. The cell after the tanh.
h: A `Tensor`. Has the same type as `x`. The output h vector.
Raises:
ValueError: If cell_size is None.
"""
if wci is None:
cell_size = cs_prev.get_shape().with_rank(2)[1].value
if cell_size is None:
raise ValueError("cell_size from `cs_prev` should not be None.")
wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])
wcf = wci
wco = wci
# pylint: disable=protected-access
return gen_lstm_ops.lstm_block_cell(
x=x,
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wcf=wcf,
wco=wco,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip if cell_clip is not None else -1,
use_peephole=use_peephole,
name=name)
# pylint: enable=protected-access
def _block_lstm(seq_len_max,
x,
w,
b,
cs_prev=None,
h_prev=None,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
r"""TODO(williamchan): add doc.
Args:
seq_len_max: A `Tensor` of type `int64`.
x: A list of at least 1 `Tensor` objects of the same type in: `float32`.
w: A `Tensor`. Must have the same type as `x`.
b: A `Tensor`. Must have the same type as `x`.
cs_prev: A `Tensor`. Must have the same type as `x`.
h_prev: A `Tensor`. Must have the same type as `x`.
wci: A `Tensor`. Must have the same type as `x`.
wcf: A `Tensor`. Must have the same type as `x`.
wco: A `Tensor`. Must have the same type as `x`.
forget_bias: An optional `float`. Defaults to `1`.
cell_clip: An optional `float`. Defaults to `-1` (no clipping).
use_peephole: An optional `bool`. Defaults to `False`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
cs: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
f: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
o: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
ci: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
co: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
h: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
Raises:
ValueError: If `b` does not have a valid shape.
"""
batch_size = x[0].get_shape().with_rank(2)[0].value
cell_size4 = b.get_shape().with_rank(1)[0].value
if cell_size4 is None:
raise ValueError("`b` shape must not be None.")
cell_size = cell_size4 / 4
zero_state = None
if cs_prev is None or h_prev is None:
zero_state = array_ops.constant(
0, dtype=dtypes.float32, shape=[batch_size, cell_size])
if cs_prev is None:
cs_prev = zero_state
if h_prev is None:
h_prev = zero_state
if wci is None:
wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])
wcf = wci
wco = wci
# pylint: disable=protected-access
i, cs, f, o, ci, co, h = gen_lstm_ops.block_lstm(
seq_len_max=seq_len_max,
x=array_ops.stack(x),
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wcf=wcf,
wco=wco,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip if cell_clip is not None else -1,
name=name,
use_peephole=use_peephole)
return array_ops.unstack(i), array_ops.unstack(cs), array_ops.unstack(
f), array_ops.unstack(o), array_ops.unstack(ci), array_ops.unstack(
co), array_ops.unstack(h)
# pylint: enable=protected-access
# pylint: enable=invalid-name
_lstm_block_cell_grad_outputs = ["cs_prev_grad", "dicfo"]
@ops.RegisterGradient("LSTMBlockCell")
def _LSTMBlockCellGrad(op, *grad):
"""Gradient for LSTMBlockCell."""
(x, cs_prev, h_prev, w, wci, wcf, wco, b) = op.inputs
(i, cs, f, o, ci, co, _) = op.outputs
(_, cs_grad, _, _, _, _, h_grad) = grad
batch_size = x.get_shape().with_rank(2)[0].value
if batch_size is None:
batch_size = -1
input_size = x.get_shape().with_rank(2)[1].value
if input_size is None:
raise ValueError("input_size from `x` should not be None.")
cell_size = cs_prev.get_shape().with_rank(2)[1].value
if cell_size is None:
raise ValueError("cell_size from `cs_prev` should not be None.")
(cs_prev_grad, dicfo, wci_grad, wcf_grad,
wco_grad) = gen_lstm_ops.lstm_block_cell_grad(
x,
cs_prev,
h_prev,
w,
wci,
wcf,
wco,
b,
i,
cs,
f,
o,
ci,
co,
cs_grad,
h_grad,
use_peephole=op.get_attr("use_peephole"))
# Backprop from dicfo to xh.
xh_grad = math_ops.matmul(dicfo, w, transpose_b=True)
x_grad = array_ops.slice(xh_grad, (0, 0), (batch_size, input_size))
x_grad.get_shape().merge_with(x.get_shape())
h_prev_grad = array_ops.slice(xh_grad, (0, input_size),
(batch_size, cell_size))
h_prev_grad.get_shape().merge_with(h_prev.get_shape())
# Backprop from dicfo to w.
xh = array_ops.concat([x, h_prev], 1)
w_grad = math_ops.matmul(xh, dicfo, transpose_a=True)
w_grad.get_shape().merge_with(w.get_shape())
# Backprop from dicfo to b.
b_grad = nn_ops.bias_add_grad(dicfo)
b_grad.get_shape().merge_with(b.get_shape())
return (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad,
wco_grad, b_grad)
@ops.RegisterGradient("BlockLSTM")
def _BlockLSTMGrad(op, *grad):
"""Gradient for BlockLSTM."""
seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b = op.inputs
i, cs, f, o, ci, co, h = op.outputs
cs_grad = grad[1]
h_grad = grad[6]
(x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, wco_grad,
b_grad) = gen_lstm_ops.block_lstm_grad(
seq_len_max,
x,
cs_prev,
h_prev,
w,
wci,
wcf,
wco,
b,
i,
cs,
f,
o,
ci,
co,
h,
cs_grad,
h_grad,
use_peephole=op.get_attr("use_peephole"))
return [
None, x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad,
wco_grad, b_grad
]
class LSTMBlockCell(rnn_cell_impl.RNNCell):
"""Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add `forget_bias` (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
Unlike `rnn_cell_impl.LSTMCell`, this is a monolithic op and should be much
faster. The weight and bias matrices should be compatible as long as the
variable scope matches.
"""
def __init__(self,
num_units,
forget_bias=1.0,
cell_clip=None,
use_peephole=False,
reuse=None):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
cell_clip: An optional `float`. Defaults to `-1` (no clipping).
use_peephole: Whether to use peephole connections or not.
reuse: (optional) boolean describing whether to reuse variables in an
existing scope. If not `True`, and the existing scope already has the
given variables, an error is raised.
When restoring from CudnnLSTM-trained checkpoints, must use
CudnnCompatibleLSTMBlockCell instead.
"""
super(LSTMBlockCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._forget_bias = forget_bias
self._use_peephole = use_peephole
self._cell_clip = cell_clip if cell_clip is not None else -1
self._names = {
"W": "kernel",
"b": "bias",
"wci": "w_i_diag",
"wcf": "w_f_diag",
"wco": "w_o_diag",
"scope": "lstm_cell"
}
@property
def state_size(self):
return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units)
@property
def output_size(self):
return self._num_units
def __call__(self, x, states_prev, scope=None):
"""Long short-term memory cell (LSTM)."""
with vs.variable_scope(scope or self._names["scope"]):
x_shape = x.get_shape().with_rank(2)
if not x_shape[1].value:
raise ValueError("Expecting x_shape[1] to be set: %s" % str(x_shape))
if len(states_prev) != 2:
raise ValueError("Expecting states_prev to be a tuple with length 2.")
input_size = x_shape[1].value
w = vs.get_variable(self._names["W"], [input_size + self._num_units,
self._num_units * 4])
b = vs.get_variable(
self._names["b"], [w.get_shape().with_rank(2)[1].value],
initializer=init_ops.constant_initializer(0.0))
if self._use_peephole:
wci = vs.get_variable(self._names["wci"], [self._num_units])
wcf = vs.get_variable(self._names["wcf"], [self._num_units])
wco = vs.get_variable(self._names["wco"], [self._num_units])
else:
wci = wcf = wco = array_ops.zeros([self._num_units])
(cs_prev, h_prev) = states_prev
(_, cs, _, _, _, _, h) = _lstm_block_cell(
x,
cs_prev,
h_prev,
w,
b,
wci=wci,
wcf=wcf,
wco=wco,
forget_bias=self._forget_bias,
cell_clip=self._cell_clip,
use_peephole=self._use_peephole)
new_state = rnn_cell_impl.LSTMStateTuple(cs, h)
return h, new_state
class LSTMBlockWrapper(fused_rnn_cell.FusedRNNCell):
"""This is a helper class that provides housekeeping for LSTM cells.
This may be useful for alternative LSTM and similar type of cells.
The subclasses must implement `_call_cell` method and `num_units` property.
"""
@abc.abstractproperty
def num_units(self):
"""Number of units in this cell (output dimension)."""
pass
@abc.abstractmethod
def _call_cell(self, inputs, initial_cell_state, initial_output, dtype,
sequence_length):
"""Run this LSTM on inputs, starting from the given state.
This method must be implemented by subclasses and does the actual work
of calling the cell.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An int32
or int64 vector (tensor) size [batch_size], values in [0, time_len) or
None.
Returns:
A pair containing:
- State: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
- Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
"""
pass
def __call__(self,
inputs,
initial_state=None,
dtype=None,
sequence_length=None,
scope=None):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
or a list of `time_len` tensors of shape `[batch_size, input_size]`.
initial_state: a tuple `(initial_cell_state, initial_output)` with tensors
of shape `[batch_size, self._num_units]`. If this is not provided, the
cell is expected to create a zero initial state of type `dtype`.
dtype: The data type for the initial state and expected output. Required
if `initial_state` is not provided or RNN state has a heterogeneous
dtype.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len).`
Defaults to `time_len` for each element.
scope: `VariableScope` for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
or a list of time_len tensors of shape `[batch_size, output_size]`,
to match the type of the `inputs`.
- Final state: a tuple `(cell_state, output)` matching `initial_state`.
Raises:
ValueError: in case of shape mismatches
"""
with vs.variable_scope(scope or "lstm_block_wrapper"):
is_list = isinstance(inputs, list)
if is_list:
inputs = array_ops.stack(inputs)
inputs_shape = inputs.get_shape().with_rank(3)
if not inputs_shape[2]:
raise ValueError("Expecting inputs_shape[2] to be set: %s" %
inputs_shape)
batch_size = inputs_shape[1].value
if batch_size is None:
batch_size = array_ops.shape(inputs)[1]
time_len = inputs_shape[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
# Provide default values for initial_state and dtype
if initial_state is None:
if dtype is None:
raise ValueError(
"Either initial_state or dtype needs to be specified")
z = array_ops.zeros(
array_ops.stack([batch_size, self.num_units]), dtype=dtype)
initial_state = z, z
else:
if len(initial_state) != 2:
raise ValueError(
"Expecting initial_state to be a tuple with length 2 or None")
if dtype is None:
dtype = initial_state[0].dtype
# create the actual cell
if sequence_length is not None:
sequence_length = ops.convert_to_tensor(sequence_length)
initial_cell_state, initial_output = initial_state # pylint: disable=unpacking-non-sequence
cell_states, outputs = self._call_cell(inputs, initial_cell_state,
initial_output, dtype,
sequence_length)
if sequence_length is not None:
# Mask out the part beyond sequence_length
mask = array_ops.transpose(
array_ops.sequence_mask(
sequence_length, time_len, dtype=dtype), [1, 0])
mask = array_ops.tile(
array_ops.expand_dims(mask, [-1]), [1, 1, self.num_units])
outputs *= mask
# Prepend initial states to cell_states and outputs for indexing to work
# correctly,since we want to access the last valid state at
# sequence_length - 1, which can even be -1, corresponding to the
# initial state.
mod_cell_states = array_ops.concat(
[array_ops.expand_dims(initial_cell_state, [0]), cell_states], 0)
mod_outputs = array_ops.concat(
[array_ops.expand_dims(initial_output, [0]), outputs], 0)
final_cell_state = self._gather_states(mod_cell_states, sequence_length,
batch_size)
final_output = self._gather_states(mod_outputs, sequence_length,
batch_size)
else:
# No sequence_lengths used: final state is the last state
final_cell_state = cell_states[-1]
final_output = outputs[-1]
if is_list:
# Input was a list, so return a list
outputs = array_ops.unstack(outputs)
final_state = rnn_cell_impl.LSTMStateTuple(final_cell_state, final_output)
return outputs, final_state
def _gather_states(self, data, indices, batch_size):
"""Produce `out`, s.t. out(i, j) = data(indices(i), i, j)."""
mod_indices = indices * batch_size + math_ops.range(batch_size)
return array_ops.gather(
array_ops.reshape(data, [-1, self.num_units]), mod_indices)
class LSTMBlockFusedCell(LSTMBlockWrapper):
"""FusedRNNCell implementation of LSTM.
This is an extremely efficient LSTM implementation, that uses a single TF op
for the entire LSTM. It should be both faster and more memory-efficient than
LSTMBlockCell defined above.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
The variable naming is consistent with `rnn_cell_impl.LSTMCell`.
"""
def __init__(self,
num_units,
forget_bias=1.0,
cell_clip=None,
use_peephole=False):
"""Initialize the LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
cell_clip: clip the cell to this value. Default is no cell clipping.
use_peephole: Whether to use peephole connections or not.
"""
self._num_units = num_units
self._forget_bias = forget_bias
self._cell_clip = cell_clip if cell_clip is not None else -1
self._use_peephole = use_peephole
@property
def num_units(self):
"""Number of units in this cell (output dimension)."""
return self._num_units
def _call_cell(self, inputs, initial_cell_state, initial_output, dtype,
sequence_length):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len)` or None.
Returns:
A pair containing:
- Cell state (cs): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
- Output (h): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
"""
inputs_shape = inputs.get_shape().with_rank(3)
time_len = inputs_shape[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
input_size = inputs_shape[2].value
w = vs.get_variable(
"kernel",
[input_size + self._num_units, self._num_units * 4], dtype=dtype)
b = vs.get_variable(
"bias", [w.get_shape().with_rank(2)[1]],
initializer=init_ops.constant_initializer(0.0),
dtype=dtype)
if self._use_peephole:
wci = vs.get_variable("w_i_diag", [self._num_units], dtype=dtype)
wcf = vs.get_variable("w_f_diag", [self._num_units], dtype=dtype)
wco = vs.get_variable("w_o_diag", [self._num_units], dtype=dtype)
else:
wci = wcf = wco = array_ops.zeros([self._num_units], dtype=dtype)
if sequence_length is None:
max_seq_len = math_ops.to_int64(time_len)
else:
max_seq_len = math_ops.to_int64(math_ops.reduce_max(sequence_length))
_, cs, _, _, _, _, h = gen_lstm_ops.block_lstm(
seq_len_max=max_seq_len,
x=inputs,
cs_prev=initial_cell_state,
h_prev=initial_output,
w=w,
wci=wci,
wcf=wcf,
wco=wco,
b=b,
forget_bias=self._forget_bias,
cell_clip=self._cell_clip,
use_peephole=self._use_peephole)
return cs, h
| apache-2.0 |
sarutobi/ritmserdtsa | rynda/message/migrations/0001_initial.py | 3 | 4623 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('geozones', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200, verbose_name='name', db_column=b'name')),
('description', models.TextField(verbose_name='description', db_column=b'description', blank=True)),
('slug', models.SlugField(max_length=255, verbose_name='slug', db_column=b'slug', blank=True)),
('order', models.SmallIntegerField(db_column=b'order')),
],
options={
'ordering': ['order'],
'verbose_name': 'category',
'verbose_name_plural': 'categories',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=200, verbose_name='Title', blank=True)),
('message', models.TextField(verbose_name='Message')),
('additional_info', jsonfield.fields.JSONField(default=b'', verbose_name='Additional info', blank=True)),
('messageType', models.IntegerField(verbose_name='Message type', db_column=b'message_type', choices=[(1, 'Request message'), (2, 'Offer message'), (3, 'Informatial message')])),
('source', models.CharField(max_length=255, verbose_name='source', blank=True)),
('is_virtual', models.BooleanField(default=False, verbose_name='Is virtual')),
('is_active', models.BooleanField(default=False, verbose_name='active')),
('is_important', models.BooleanField(default=False, verbose_name='important')),
('is_anonymous', models.BooleanField(default=True, verbose_name='hide contacts')),
('is_removed', models.BooleanField(default=False, verbose_name='removed')),
('allow_feedback', models.BooleanField(default=True, verbose_name='allow feedback')),
('status', models.SmallIntegerField(default=1, null=True, verbose_name='status', blank=True, choices=[(1, 'New'), (2, 'Unverified'), (3, 'Verified'), (4, 'Pending'), (6, 'Closed')])),
('date_add', models.DateTimeField(auto_now_add=True, db_column=b'date_add')),
('last_edit', models.DateTimeField(auto_now=True, db_column=b'date_modify')),
('expired_date', models.DateTimeField(null=True, verbose_name='expired at', blank=True)),
('edit_key', models.CharField(max_length=40, blank=True)),
('sender_ip', models.IPAddressField(verbose_name='sender IP', null=True, editable=False, blank=True)),
('category', models.ManyToManyField(to='message.Category', null=True, verbose_name='message categories', blank=True)),
('linked_location', models.ForeignKey(blank=True, to='geozones.Location', null=True)),
('user', models.ForeignKey(db_column=b'user_id', editable=False, to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'ordering': ['-date_add'],
'get_latest_by': 'date_add',
'verbose_name': 'Message',
'verbose_name_plural': 'Messages',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MessageNotes',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('note', models.TextField(verbose_name='Note')),
('date_add', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('last_edit', models.DateTimeField(auto_now=True, verbose_name='Last edit')),
('message', models.ForeignKey(to='message.Message')),
('user', models.ForeignKey(editable=False, to=settings.AUTH_USER_MODEL, verbose_name='Author')),
],
options={
},
bases=(models.Model,),
),
]
| mit |
prune998/ansible | lib/ansible/modules/clustering/pacemaker_cluster.py | 27 | 7664 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2016, Mathieu Bultel <mbultel@redhat.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pacemaker_cluster
short_description: Manage a pacemaker cluster
version_added: "2.3"
author: "Mathieu Bultel (matbu)"
description:
- This module can manage a pacemaker cluster and nodes from Ansible using
the pacemaker cli.
options:
state:
description:
- Indicate desired state of the cluster
choices: ['online', 'offline', 'restart', 'cleanup']
required: true
node:
description:
- Specify which node of the cluster you want to manage. None == the
cluster status itself, 'all' == check the status of all nodes.
required: false
default: None
timeout:
description:
- Timeout when the module should considered that the action has failed
required: false
default: 300
force:
description:
- Force the change of the cluster state
required: false
default: true
requirements:
- "python >= 2.6"
'''
EXAMPLES = '''
---
- name: Set cluster Online
hosts: localhost
gather_facts: no
tasks:
- name: get cluster state
pacemaker_cluster: state=online
'''
RETURN = '''
change:
description: True if the cluster state has changed
type: bool
out:
description: The output of the current state of the cluster. It return a
list of the nodes state.
type: string
sample: 'out: [[" overcloud-controller-0", " Online"]]}'
rc:
description: exit code of the module
type: bool
'''
import time
from distutils.version import StrictVersion
_PCS_CLUSTER_DOWN="Error: cluster is not currently running on this node"
def get_cluster_status(module):
cmd = "pcs cluster status"
rc, out, err = module.run_command(cmd)
if out in _PCS_CLUSTER_DOWN:
return 'offline'
else:
return 'online'
def get_node_status(module, node='all'):
if node == 'all':
cmd = "pcs cluster pcsd-status %s" % node
else:
cmd = "pcs cluster pcsd-status"
rc, out, err = module.run_command(cmd)
if rc is 1:
module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
status = []
for o in out.splitlines():
status.append(o.split(':'))
return status
def clean_cluster(module, timeout):
cmd = "pcs resource cleanup"
rc, out, err = module.run_command(cmd)
if rc is 1:
module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
def set_cluster(module, state, timeout, force):
if state == 'online':
cmd = "pcs cluster start"
if state == 'offline':
cmd = "pcs cluster stop"
if force:
cmd = "%s --force" % cmd
rc, out, err = module.run_command(cmd)
if rc is 1:
module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
t = time.time()
ready = False
while time.time() < t+timeout:
cluster_state = get_cluster_status(module)
if cluster_state == state:
ready = True
break
if not ready:
module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state))
def set_node(module, state, timeout, force, node='all'):
# map states
if state == 'online':
cmd = "pcs cluster start"
if state == 'offline':
cmd = "pcs cluster stop"
if force:
cmd = "%s --force" % cmd
nodes_state = get_node_status(module, node)
for node in nodes_state:
if node[1].strip().lower() != state:
cmd = "%s %s" % (cmd, node[0].strip())
rc, out, err = module.run_command(cmd)
if rc is 1:
module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
t = time.time()
ready = False
while time.time() < t+timeout:
nodes_state = get_node_status(module)
for node in nodes_state:
if node[1].strip().lower() == state:
ready = True
break
if not ready:
module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state))
def main():
argument_spec = dict(
state = dict(choices=['online', 'offline', 'restart', 'cleanup']),
node = dict(default=None),
timeout=dict(default=300, type='int'),
force=dict(default=True, type='bool'),
)
module = AnsibleModule(argument_spec,
supports_check_mode=True,
)
changed = False
state = module.params['state']
node = module.params['node']
force = module.params['force']
timeout = module.params['timeout']
if state in ['online', 'offline']:
# Get cluster status
if node is None:
cluster_state = get_cluster_status(module)
if cluster_state == state:
module.exit_json(changed=changed,
out=cluster_state)
else:
set_cluster(module, state, timeout, force)
cluster_state = get_cluster_status(module)
if cluster_state == state:
module.exit_json(changed=True,
out=cluster_state)
else:
module.fail_json(msg="Fail to bring the cluster %s" % state)
else:
cluster_state = get_node_status(module, node)
# Check cluster state
for node_state in cluster_state:
if node_state[1].strip().lower() == state:
module.exit_json(changed=changed,
out=cluster_state)
else:
# Set cluster status if needed
set_cluster(module, state, timeout, force)
cluster_state = get_node_status(module, node)
module.exit_json(changed=True,
out=cluster_state)
if state in ['restart']:
set_cluster(module, 'offline', timeout, force)
cluster_state = get_cluster_status(module)
if cluster_state == 'offline':
set_cluster(module, 'online', timeout, force)
cluster_state = get_cluster_status(module)
if cluster_state == 'online':
module.exit_json(changed=True,
out=cluster_state)
else:
module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be started")
else:
module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be stopped")
if state in ['cleanup']:
set_cluster(module, state, timeout, force)
module.exit_json(changed=True,
out=cluster_state)
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| gpl-3.0 |
F5Networks/f5-icontrol-rest-python | icontrol/session.py | 1 | 28048 | # Copyright 2019 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A BigIP-RESTServer URI handler. REST-APIs use it on the :mod:`requests`
library.
Use this module to make calls to a BigIP-REST server. It will handle:
#. URI Sanitization uri's produced by this module are checked to ensure
compliance with the BigIP-REST server interface
#. Session Construction -- the :class:`iControlRESTSession` wraps a
:class:`requests.Session` object.
#. Logging -- pre- and post- request state is logged.
#. Exception generation -- Errors in URL construction generate
:class:`BigIPInvalidURL` subclasses; unexpected HTTP status codes raise
:class:`iControlUnexpectedHTTPError`.
The core functionality of the module is implemented via the
:class:`iControlRESTSession` class. Calls to its' HTTP-methods are checked,
pre-logged, submitted, and post-logged.
There are 2 modes of operation "full_uri", and "uri_as_parts", toggled by the
`uri_as_parts` boolean keyword param that can be passed to methods. It defaults
to `False`. Use `uri_as_parts` when you want to leverage the full
functionality of this library, and have it construct your uri for you.
Example Use in `uri_as_parts` mode:
>>> iCRS = iControlRESTSession('jrandomhacker', 'insecure')
>>> iCRS.get('https://192.168.1.1/mgmt/tm/ltm/nat/', \
partition='Common', name='VALIDNAME', uri_as_parts=True)
In `full_uri` mode:
>>> iCRS.get('https://192.168.1.1/mgmt/tm/ltm/nat/~Common~VALIDNAME')
NOTE: If used via the :mod:`f5-common-python` library the typical mode is
"full_uri" since that library binds uris to Python objects.
Available functions:
- iCRS.{get, post, put, delete, patch}: requests.Session.VERB wrappers
- decorate_HTTP_verb_method: this function preps, logs, and handles requests
against the BigIP REST Server, by pre- and post- processing the above methods.
"""
from icontrol import __version__ as version
from icontrol.authtoken import iControlRESTTokenAuth
from icontrol.exceptions import iControlUnexpectedHTTPError
from icontrol.exceptions import InvalidBigIP_ICRURI
from icontrol.exceptions import InvalidInstanceNameOrFolder
from icontrol.exceptions import InvalidPrefixCollection
from icontrol.exceptions import InvalidScheme
from icontrol.exceptions import InvalidSuffixCollection
from icontrol.exceptions import InvalidURIComponentPart
from six import iteritems
import functools
import logging
import requests
import urllib3
try:
import json
except ImportError:
import simplejson as json
try:
# Python 3
from urllib.parse import urlsplit
except ImportError:
# Python 2
from urlparse import urlsplit
BOOLEANS_TRUE = frozenset(('y', 'yes', 'on', '1', 'true', 't', 1, 1.0, True))
BOOLEANS_FALSE = frozenset(('n', 'no', 'off', '0', 'false', 'f', 0, 0.0, False))
BOOLEANS = BOOLEANS_TRUE.union(BOOLEANS_FALSE)
def _validate_icruri(base_uri):
# The icr_uri should specify https, the server name/address, and the path
# to the REST-or-tm management interface "/mgmt/tm/"
scheme, netloc, path, _, _ = urlsplit(base_uri)
if scheme != 'https':
raise InvalidScheme(scheme)
if path.startswith('/mgmt/tm/'):
# Most of the time this is BIG-IP
sub_path = path[9:]
elif path.startswith('/mgmt/cm/'):
# This can also be in iWorkflow or BIG-IQ
sub_path = path[9:]
elif path.startswith('/mgmt/ap/'):
# This can also be in BIG-IQ
sub_path = path[9:]
elif path.startswith('/mgmt/shared/'):
# This can be iWorkflow or BIG-IQ
sub_path = path[13:]
else:
error_message = "The path must start with either '/mgmt/tm/'," \
"'/mgmt/cm/', or '/mgmt/shared/'! But it's:" \
" '%s'" % path
raise InvalidBigIP_ICRURI(error_message)
return _validate_prefix_collections(sub_path)
def _validate_prefix_collections(prefix_collections):
# The prefix collections are everything in the URI after /mgmt/tm/ and
# before the 'partition' It must not start with '/' because it's relative
# to the /mgmt/tm REST management path, and it must end with '/' since the
# subequent components expect to be addressed relative to it.
# Additionally the first '/' delimited component of the prefix collection
# must be an "organizing collection". See the REST users guide:
# https://devcentral.f5.com/d/icontrol-rest-user-guide-version-1150
if not prefix_collections.endswith('/'):
error_message =\
"prefix_collections path element must end with '/', but it's: %s"\
% prefix_collections
raise InvalidPrefixCollection(error_message)
return True
def _validate_name_partition_subpath(element):
# '/' and '~' are illegal characters in most cases, however there are
# few exceptions (GTM Regions endpoint being one of them where the
# validation of name should not apply.
if element == '':
return True
if '~' in element:
error_message =\
"instance names and partitions cannot contain '~', but it's: %s"\
% element
raise InvalidInstanceNameOrFolder(error_message)
elif '/' in element:
error_message =\
"instance names and partitions cannot contain '/', but it's: %s"\
% element
raise InvalidInstanceNameOrFolder(error_message)
return True
def _validate_suffix_collections(suffix_collections):
# These collections must start with '/' since they may come after a name
# and/or partition and I do not know whether '~partition~name/' is a legal
# ending for a URI.
# The suffix must not endwith '/' as it is the last component that can
# be appended to the URI path.
if not suffix_collections.startswith('/'):
error_message =\
"suffix_collections path element must start with '/', but" \
" it's: %s" % suffix_collections
raise InvalidSuffixCollection(error_message)
if suffix_collections.endswith('/'):
error_message =\
"suffix_collections path element must not end with '/', but" \
" it's: %s" % suffix_collections
raise InvalidSuffixCollection(error_message)
return True
def _validate_uri_parts(
base_uri, partition, name, sub_path, suffix_collections,
**kwargs):
# Apply the above validators to the correct components.
_validate_icruri(base_uri)
_validate_name_partition_subpath(partition)
if not kwargs.get('transform_name', False):
_validate_name_partition_subpath(name)
if not kwargs.get('transform_subpath', False):
_validate_name_partition_subpath(sub_path)
if suffix_collections:
_validate_suffix_collections(suffix_collections)
return True
def generate_bigip_uri(base_uri, partition, name, sub_path, suffix, **kwargs):
'''(str, str, str) --> str
This function checks the supplied elements to see if each conforms to
the specification for the appropriate part of the URI. These validations
are conducted by the helper function _validate_uri_parts.
After validation the parts are assembled into a valid BigIP REST URI
string which is then submitted with appropriate metadata.
>>> generate_bigip_uri('https://0.0.0.0/mgmt/tm/ltm/nat/', \
'CUSTOMER1', 'nat52', params={'a':1})
'https://0.0.0.0/mgmt/tm/ltm/nat/~CUSTOMER1~nat52'
>>> generate_bigip_uri('https://0.0.0.0/mgmt/tm/ltm/nat/', \
'CUSTOMER1', 'nat52', params={'a':1}, suffix='/wacky')
'https://0.0.0.0/mgmt/tm/ltm/nat/~CUSTOMER1~nat52/wacky'
>>> generate_bigip_uri('https://0.0.0.0/mgmt/tm/ltm/nat/', '', '', \
params={'a':1}, suffix='/thwocky')
'https://0.0.0.0/mgmt/tm/ltm/nat/thwocky'
::Warning: There are cases where '/' and '~' characters are valid in the
object name or subPath. This is indicated by passing the 'transform_name' or 'transform_subpath' boolean
respectively as True. By default this is set to False.
'''
_validate_uri_parts(base_uri, partition, name, sub_path, suffix,
**kwargs)
if kwargs.get('transform_name', False):
if name != '':
name = name.replace('/', '~')
if kwargs.get('transform_subpath', False):
if sub_path != '':
sub_path = sub_path.replace('/', '~')
if partition != '':
partition = '~' + partition
else:
if sub_path:
msg = 'When giving the subPath component include partition ' \
'as well.'
raise InvalidURIComponentPart(msg)
if sub_path != '' and partition != '':
sub_path = '~' + sub_path
if name != '' and partition != '':
name = '~' + name
tilded_partition_and_instance = partition + sub_path + name
if suffix and not tilded_partition_and_instance:
suffix = suffix.lstrip('/')
REST_uri = base_uri + tilded_partition_and_instance + suffix
return REST_uri
def decorate_HTTP_verb_method(method):
"""Prepare and Post-Process HTTP VERB method for BigIP-RESTServer request.
This function decorates all of the HTTP VERB methods in the
iControlRESTSession class. It provides the core logic for this module.
If necessary it validates and assembles a uri from parts with a call to
`generate_bigip_uri`.
Then it:
1. pre-logs the details of the request
2. submits the request
3. logs the response, included expected status codes
4. raises exceptions for unexpected status codes. (i.e. not doc'd as BigIP
RESTServer codes.)
"""
@functools.wraps(method)
def wrapper(self, RIC_base_uri, **kwargs):
partition = kwargs.pop('partition', '')
sub_path = kwargs.pop('subPath', '')
suffix = kwargs.pop('suffix', '')
identifier, kwargs = _unique_resource_identifier_from_kwargs(**kwargs)
uri_as_parts = kwargs.pop('uri_as_parts', False)
transform_name = kwargs.pop('transform_name', False)
transform_subpath = kwargs.pop('transform_subpath', False)
if uri_as_parts:
REST_uri = generate_bigip_uri(RIC_base_uri, partition, identifier,
sub_path, suffix,
transform_name=transform_name,
transform_subpath=transform_subpath,
**kwargs)
else:
REST_uri = RIC_base_uri
pre_message = "%s WITH uri: %s AND suffix: %s AND kwargs: %s" %\
(method.__name__, REST_uri, suffix, kwargs)
logger = logging.getLogger(__name__)
logger.debug(pre_message)
response = method(self, REST_uri, **kwargs)
post_message =\
"RESPONSE::STATUS: %s Content-Type: %s Content-Encoding:"\
" %s\nText: %r" % (response.status_code,
response.headers.get('Content-Type', None),
response.headers.get('Content-Encoding', None),
response.text)
logger.debug(post_message)
if response.status_code not in range(200, 207):
error_message = '%s Unexpected Error: %s for uri: %s\nText: %r' %\
(response.status_code,
response.reason,
response.url,
response.text)
raise iControlUnexpectedHTTPError(error_message, response=response)
return response
return wrapper
def _unique_resource_identifier_from_kwargs(**kwargs):
"""Chooses an identifier given different choices
The unique identifier in BIG-IP's REST API at the time of this writing
is called 'name'. This is in contrast to the unique identifier that is
used by iWorkflow and BIG-IQ which at some times is 'name' and other
times is 'uuid'.
For example, in iWorkflow, there consider this URI
* https://10.2.2.3/mgmt/cm/cloud/tenants/{0}/services/iapp
Then consider this iWorkflow URI
* https://localhost/mgmt/cm/cloud/connectors/local/{0}
In the first example, the identifier, {0}, is what we would normally
consider a name. For example, "tenant1". In the second example though,
the value is expected to be what we would normally consider to be a
UUID. For example, '244bd478-374e-4eb2-8c73-6e46d7112604'.
This method only tries to rectify the problem of which to use.
I believe there might be some change that the two can appear together,
although I have not yet experienced it. If it is possible, I believe it
would happen in BIG-IQ/iWorkflow land where the UUID and Name both have
significance. That's why I deliberately prefer the UUID when it exists
in the parameters sent to the URL.
:param kwargs:
:return:
"""
name = kwargs.pop('name', '')
uuid = kwargs.pop('uuid', '')
id = kwargs.pop('id', '')
if uuid:
return uuid, kwargs
elif id:
# Used for /mgmt/cm/system/authn/providers/tmos on BIG-IP
return id, kwargs
else:
return name, kwargs
class iControlRESTSession(object):
"""Represents a :class:`requests.Session` that communicates with a BigIP.
Instantiate one of these when you want to communicate with a BigIP-REST
Server, it will handle BigIP-specific details of the uri's. In the
f5-common-python library, an :class:`iControlRESTSession` is instantiated
during BigIP instantiation and associated with it as an attribute of the
BigIP (a compositional vs. inheritable association).
Objects instantiated from this class provide an HTTP 1.1 style session, via
the :class:`requests.Session` object, and HTTP-methods that are specialized
to the BigIP-RESTServer interface.
Pass ``token=True`` in ``**kwargs`` to use token-based authentication.
This is required for users that do not have the Administrator role on
BigIP.
"""
def __init__(self, username, password, **kwargs):
"""Instantiation associated with requests.Session via composition.
All transactions are Trust On First Use (TOFU) to the BigIP device,
since no PKI exists for this purpose in general, hence the
"disable_warnings" statement.
Attributes:
username (str): The user to connect with.
password (str): The password of the user.
timeout (int): The timeout, in seconds, to wait before closing
the session.
token (bool|str): True or False, specifying whether to use token
authentication or not.
token_to_use (str): String containing the token itself to use.
This is particularly useful in situations where you want to
mimic the behavior of a browser insofar as storing the token
in a cookie and retrieving it for use "later". This is used
in situations such as automation tools to prevent token
abuse on the BIG-IP. There is a limit that users may not go
beyond when creating tokens and their re-use is an attempt
to mitigate this scenario.
user_agent (str): A string to append to the user agent header
that is sent during a session.
verify (str): The path to a CA bundle containing the CA
certificate for SSL validation
auth_provider: String specifying the specific auth provider to
authenticate the username/password against. If this argument
is specified, the `token` argument is ignored. This keyword
implies that token based authentication is used. The strings
"none" and "default" are reserved words that imply no specific
auth provider is to be used; the system will default to one.
On BIG-IQ systems, the value 'local' can be used to refer to
local user authentication.
"""
# Used for holding debug information
self._debug_output = []
self._debug = False
verify = kwargs.pop('verify', False)
timeout = kwargs.pop('timeout', 30)
proxies = kwargs.pop('proxies', {})
token_auth = kwargs.pop('token', None)
user_agent = kwargs.pop('user_agent', None)
token_to_use = kwargs.pop('token_to_use', None)
auth_provider = kwargs.pop('auth_provider', None)
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
try:
requests.packages.urllib3.disable_warnings()
except AttributeError:
try:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
except Exception:
pass
# Compose with a Session obj
self.session = requests.Session()
# Configure with passed parameters
self.session.timeout = timeout
# Configure with proxy parameters
self.session.proxies = proxies
# Handle token-based auth.
if token_to_use:
self.session.auth = iControlRESTTokenAuth('admin', 'admin', proxies=proxies)
self.session.auth.token = token_to_use
else:
if auth_provider:
self.session.auth = iControlRESTTokenAuth(
username, password, auth_provider=auth_provider, verify=verify, proxies=proxies
)
else:
if token_auth is True:
self.session.auth = iControlRESTTokenAuth(
username, password, verify=verify, proxies=proxies
)
elif token_auth:
# Truthy but not true: non-default loginAuthProvider
self.session.auth = iControlRESTTokenAuth(
username, password, token_auth, verify=verify, proxies=proxies
)
else:
self.session.auth = (username, password)
# Set state as indicated by ancestral code.
self.session.verify = verify
self.session.headers.update({'Content-Type': 'application/json'})
# Add a user agent for this library and any specified UA
self.append_user_agent('f5-icontrol-rest-python/' + version)
if user_agent:
self.append_user_agent(user_agent)
@property
def debug(self):
return self._debug
@debug.setter
def debug(self, value):
if value in BOOLEANS:
self._debug = value
@property
def debug_output(self):
return self._debug_output
@decorate_HTTP_verb_method
def delete(self, uri, **kwargs):
"""Sends a HTTP DELETE command to the BIGIP REST Server.
Use this method to send a DELETE command to the BIGIP. When calling
this method with the optional arguments ``name`` and ``partition``
as part of ``**kwargs`` they will be added to the ``uri`` passed
in separated by ~ to create a proper BIGIP REST API URL for objects.
All other parameters passed in as ``**kwargs`` are passed directly
to the :meth:`requests.Session.delete`
:param uri: A HTTP URI
:type uri: str
:param name: The object name that will be appended to the uri
:type name: str
:arg partition: The partition name that will be appened to the uri
:type partition: str
:param \**kwargs: The :meth:`reqeusts.Session.delete` optional params
"""
args1 = get_request_args(kwargs)
args2 = get_send_args(kwargs)
req = requests.Request('DELETE', uri, **args1)
prepared = self.session.prepare_request(req)
if self.debug:
self._debug_output.append(debug_prepared_request(prepared))
return self.session.send(prepared, **args2)
@decorate_HTTP_verb_method
def get(self, uri, **kwargs):
"""Sends a HTTP GET command to the BIGIP REST Server.
Use this method to send a GET command to the BIGIP. When calling
this method with the optional arguments ``name`` and ``partition``
as part of ``**kwargs`` they will be added to the ``uri`` passed
in separated by ~ to create a proper BIGIP REST API URL for objects.
All other parameters passed in as ``**kwargs`` are passed directly
to the :meth:`requests.Session.get`
:param uri: A HTTP URI
:type uri: str
:param name: The object name that will be appended to the uri
:type name: str
:arg partition: The partition name that will be appened to the uri
:type partition: str
:param \**kwargs: The :meth:`reqeusts.Session.get` optional params
"""
args1 = get_request_args(kwargs)
args2 = get_send_args(kwargs)
req = requests.Request('GET', uri, **args1)
prepared = self.session.prepare_request(req)
if self.debug:
self._debug_output.append(debug_prepared_request(prepared))
return self.session.send(prepared, **args2)
@decorate_HTTP_verb_method
def patch(self, uri, data=None, **kwargs):
"""Sends a HTTP PATCH command to the BIGIP REST Server.
Use this method to send a PATCH command to the BIGIP. When calling
this method with the optional arguments ``name`` and ``partition``
as part of ``**kwargs`` they will be added to the ``uri`` passed
in separated by ~ to create a proper BIGIP REST API URL for objects.
All other parameters passed in as ``**kwargs`` are passed directly
to the :meth:`requests.Session.patch`
:param uri: A HTTP URI
:type uri: str
:param data: The data to be sent with the PATCH command
:type data: str
:param name: The object name that will be appended to the uri
:type name: str
:arg partition: The partition name that will be appened to the uri
:type partition: str
:param \**kwargs: The :meth:`reqeusts.Session.patch` optional params
"""
args1 = get_request_args(kwargs)
args2 = get_send_args(kwargs)
req = requests.Request('PATCH', uri, data=data, **args1)
prepared = self.session.prepare_request(req)
if self.debug:
self._debug_output.append(debug_prepared_request(prepared))
return self.session.send(prepared, **args2)
@decorate_HTTP_verb_method
def post(self, uri, data=None, json=None, **kwargs):
"""Sends a HTTP POST command to the BIGIP REST Server.
Use this method to send a POST command to the BIGIP. When calling
this method with the optional arguments ``name`` and ``partition``
as part of ``**kwargs`` they will be added to the ``uri`` passed
in separated by ~ to create a proper BIGIP REST API URL for objects.
All other parameters passed in as ``**kwargs`` are passed directly
to the :meth:`requests.Session.post`
:param uri: A HTTP URI
:type uri: str
:param data: The data to be sent with the POST command
:type data: str
:param json: The JSON data to be sent with the POST command
:type json: dict
:param name: The object name that will be appended to the uri
:type name: str
:arg partition: The partition name that will be appened to the uri
:type partition: str
:param \**kwargs: The :meth:`reqeusts.Session.post` optional params
"""
args1 = get_request_args(kwargs)
args2 = get_send_args(kwargs)
req = requests.Request('POST', uri, data=data, json=json, **args1)
prepared = self.session.prepare_request(req)
if self.debug:
self._debug_output.append(debug_prepared_request(prepared))
return self.session.send(prepared, **args2)
@decorate_HTTP_verb_method
def put(self, uri, data=None, **kwargs):
"""Sends a HTTP PUT command to the BIGIP REST Server.
Use this method to send a PUT command to the BIGIP. When calling
this method with the optional arguments ``name`` and ``partition``
as part of ``**kwargs`` they will be added to the ``uri`` passed
in separated by ~ to create a proper BIGIP REST API URL for objects.
All other parameters passed in as ``**kwargs`` are passed directly
to the :meth:`requests.Session.put`
:param uri: A HTTP URI
:type uri: str
:param data: The data to be sent with the PUT command
:type data: str
:param json: The JSON data to be sent with the PUT command
:type json: dict
:param name: The object name that will be appended to the uri
:type name: str
:arg partition: The partition name that will be appended to the uri
:type partition: str
:param **kwargs: The :meth:`reqeusts.Session.put` optional params
"""
args1 = get_request_args(kwargs)
args2 = get_send_args(kwargs)
req = requests.Request('PUT', uri, data=data, **args1)
prepared = self.session.prepare_request(req)
if self.debug:
self._debug_output.append(debug_prepared_request(prepared))
return self.session.send(prepared, **args2)
def append_user_agent(self, user_agent):
"""Append text to the User-Agent header for the request.
Use this method to update the User-Agent header by appending the
given string to the session's User-Agent header separated by a space.
:param user_agent: A string to append to the User-Agent header
:type user_agent: str
"""
old_ua = self.session.headers.get('User-Agent', '')
ua = old_ua + ' ' + user_agent
self.session.headers['User-Agent'] = ua.strip()
@property
def token(self):
"""Convenience wrapper around returning the current token
Returns:
result (str): The current token being sent in session headers.
"""
return self.session.auth.token
@token.setter
def token(self, value):
"""Convenience wrapper around overwriting the current token
Useful in situations where you have an existing iControlRESTSession
object which you want to set a new token on. This token could have
been read from a stored value for example.
"""
self.session.auth.token = value
def debug_prepared_request(request):
result = "curl -k -X {0} {1}".format(request.method.upper(), request.url)
for k, v in iteritems(request.headers):
result = result + " -H '{0}: {1}'".format(k, v)
if any(v == 'application/json' for k, v in iteritems(request.headers)):
if request.body:
kwargs = json.loads(request.body.decode('utf-8'))
result = result + " -d '" + json.dumps(kwargs, sort_keys=True) + "'"
return result
def get_send_args(kwargs):
result = []
for arg in ['stream', 'timeout', 'verify', 'cert', 'proxies']:
result.append((arg, kwargs.pop(arg, None)))
result = dict([(k, v) for k, v in result if v is not None])
return result
def get_request_args(kwargs):
result = []
for arg in ['headers', 'files', 'data', 'json', 'params', 'auth', 'cookies', 'hooks']:
result.append((arg, kwargs.pop(arg, None)))
result = dict([(k, v) for k, v in result if v is not None])
return result
| apache-2.0 |
zhanqxun/cv_fish | pythonwin/pywin/Demos/openGLDemo.py | 4 | 8939 | # Ported from the win32 and MFC OpenGL Samples.
from pywin.mfc import docview
import sys
try:
from OpenGL.GL import *
from OpenGL.GLU import *
except ImportError:
print "The OpenGL extensions do not appear to be installed."
print "This Pythonwin demo can not run"
sys.exit(1)
import win32con
import win32ui
import win32api
import timer
PFD_TYPE_RGBA = 0
PFD_TYPE_COLORINDEX = 1
PFD_MAIN_PLANE = 0
PFD_OVERLAY_PLANE = 1
PFD_UNDERLAY_PLANE = (-1)
PFD_DOUBLEBUFFER = 0x00000001
PFD_STEREO = 0x00000002
PFD_DRAW_TO_WINDOW = 0x00000004
PFD_DRAW_TO_BITMAP = 0x00000008
PFD_SUPPORT_GDI = 0x00000010
PFD_SUPPORT_OPENGL = 0x00000020
PFD_GENERIC_FORMAT = 0x00000040
PFD_NEED_PALETTE = 0x00000080
PFD_NEED_SYSTEM_PALETTE = 0x00000100
PFD_SWAP_EXCHANGE = 0x00000200
PFD_SWAP_COPY = 0x00000400
PFD_SWAP_LAYER_BUFFERS = 0x00000800
PFD_GENERIC_ACCELERATED = 0x00001000
PFD_DEPTH_DONTCARE = 0x20000000
PFD_DOUBLEBUFFER_DONTCARE = 0x40000000
PFD_STEREO_DONTCARE = 0x80000000
#threeto8 = [0, 0o111>>1, 0o222>>1, 0o333>>1, 0o444>>1, 0o555>>1, 0o666>>1, 0o377]
threeto8 = [0, 73>>1, 146>>1, 219>>1, 292>>1, 365>>1, 438>>1, 255]
twoto8 = [0, 0x55, 0xaa, 0xff]
oneto8 = [0, 255]
def ComponentFromIndex(i, nbits, shift):
# val = (unsigned char) (i >> shift);
val = (i >> shift) & 0xF;
if nbits==1:
val = val & 0x1
return oneto8[val]
elif nbits==2:
val = val & 0x3
return twoto8[val]
elif nbits==3:
val = val & 0x7
return threeto8[val]
else:
return 0;
OpenGLViewParent=docview.ScrollView
class OpenGLView(OpenGLViewParent):
def PreCreateWindow(self, cc):
self.HookMessage (self.OnSize, win32con.WM_SIZE)
# An OpenGL window must be created with the following flags and must not
# include CS_PARENTDC for the class style. Refer to SetPixelFormat
# documentation in the "Comments" section for further information.
style = cc[5]
style = style | win32con.WS_CLIPSIBLINGS | win32con.WS_CLIPCHILDREN
cc = cc[0], cc[1], cc[2], cc[3], cc[4], style, cc[6], cc[7], cc[8]
cc = self._obj_.PreCreateWindow(cc)
return cc
def OnSize (self, params):
lParam = params[3]
cx = win32api.LOWORD(lParam)
cy = win32api.HIWORD(lParam)
glViewport(0, 0, cx, cy)
if self.oldrect[2] > cx or self.oldrect[3] > cy:
self.RedrawWindow()
self.OnSizeChange(cx, cy)
self.oldrect = self.oldrect[0], self.oldrect[1], cx, cy
def OnInitialUpdate(self):
self.SetScaleToFitSize((100,100)) # or SetScrollSizes() - A Pythonwin requirement
return self._obj_.OnInitialUpdate()
# return rc
def OnCreate(self, cs):
self.oldrect = self.GetClientRect()
self._InitContexts()
self.Init()
def OnDestroy(self, msg):
self.Term()
self._DestroyContexts()
return OpenGLViewParent.OnDestroy(self, msg)
def OnDraw(self, dc):
self.DrawScene()
def OnEraseBkgnd(self, dc):
return 1
# The OpenGL helpers
def _SetupPixelFormat(self):
dc = self.dc.GetSafeHdc()
pfd = CreatePIXELFORMATDESCRIPTOR()
pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER
pfd.iPixelType = PFD_TYPE_RGBA
pfd.cColorBits = 24
pfd.cDepthBits = 32
pfd.iLayerType = PFD_MAIN_PLANE
pixelformat = ChoosePixelFormat(dc, pfd)
SetPixelFormat(dc, pixelformat, pfd)
self._CreateRGBPalette()
def _CreateRGBPalette(self):
dc = self.dc.GetSafeHdc()
n = GetPixelFormat(dc)
pfd = DescribePixelFormat(dc, n)
if pfd.dwFlags & PFD_NEED_PALETTE:
n = 1 << pfd.cColorBits
pal = []
for i in range(n):
this = ComponentFromIndex(i, pfd.cRedBits, pfd.cRedShift), \
ComponentFromIndex(i, pfd.cGreenBits, pfd.cGreenShift), \
ComponentFromIndex(i, pfd.cBlueBits, pfd.cBlueShift), \
0
pal.append(this)
hpal = win32ui.CreatePalette(pal)
self.dc.SelectPalette(hpal, 0)
self.dc.RealizePalette()
def _InitContexts(self):
self.dc = self.GetDC()
self._SetupPixelFormat()
hrc = wglCreateContext(self.dc.GetSafeHdc())
wglMakeCurrent(self.dc.GetSafeHdc(), hrc)
def _DestroyContexts(self):
hrc = wglGetCurrentContext()
wglMakeCurrent(0, 0)
if hrc: wglDeleteContext(hrc)
# The methods to support OpenGL
def DrawScene(self):
assert 0, "You must override this method"
def Init(self):
assert 0, "You must override this method"
def OnSizeChange(self, cx, cy):
pass
def Term(self):
pass
class TestView(OpenGLView):
def OnSizeChange(self, right, bottom):
glClearColor( 0.0, 0.0, 0.0, 1.0 );
glClearDepth( 1.0 );
glEnable(GL_DEPTH_TEST)
glMatrixMode( GL_PROJECTION )
if bottom:
aspect = right / bottom
else:
aspect = 0 # When window created!
glLoadIdentity()
gluPerspective( 45.0, aspect, 3.0, 7.0 )
glMatrixMode( GL_MODELVIEW )
near_plane = 3.0;
far_plane = 7.0;
maxObjectSize = 3.0;
self.radius = near_plane + maxObjectSize/2.0;
def Init(self):
pass
def DrawScene(self):
glClearColor(0.0, 0.0, 0.0, 1.0)
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT )
glPushMatrix()
glTranslatef(0.0, 0.0, -self.radius);
self._DrawCone()
self._DrawPyramid()
glPopMatrix()
glFinish()
SwapBuffers( wglGetCurrentDC() )
def _DrawCone(self):
glColor3f(0.0, 1.0, 0.0)
glPushMatrix()
glTranslatef(-1.0, 0.0, 0.0);
quadObj = gluNewQuadric();
gluQuadricDrawStyle(quadObj, GLU_FILL);
gluQuadricNormals(quadObj, GLU_SMOOTH);
gluCylinder(quadObj, 1.0, 0.0, 1.0, 20, 10);
# gluDeleteQuadric(quadObj);
glPopMatrix();
def _DrawPyramid(self):
glPushMatrix()
glTranslatef(1.0, 0.0, 0.0)
glBegin(GL_TRIANGLE_FAN)
glColor3f(1.0, 0.0, 0.0)
glVertex3f(0.0, 1.0, 0.0)
glColor3f(0.0, 1.0, 0.0)
glVertex3f(-1.0, 0.0, 0.0)
glColor3f(0.0, 0.0, 1.0)
glVertex3f(0.0, 0.0, 1.0)
glColor3f(0.0, 1.0, 0.0)
glVertex3f(1.0, 0.0, 0.0)
glEnd()
glPopMatrix()
class CubeView(OpenGLView):
def OnSizeChange(self, right, bottom):
glClearColor( 0.0, 0.0, 0.0, 1.0 );
glClearDepth( 1.0 );
glEnable(GL_DEPTH_TEST)
glMatrixMode( GL_PROJECTION )
if bottom:
aspect = right / bottom
else:
aspect = 0 # When window created!
glLoadIdentity()
gluPerspective( 45.0, aspect, 3.0, 7.0 )
glMatrixMode( GL_MODELVIEW )
near_plane = 3.0;
far_plane = 7.0;
maxObjectSize = 3.0;
self.radius = near_plane + maxObjectSize/2.0;
def Init(self):
self.busy = 0
self.wAngleY = 10.0
self.wAngleX = 1.0
self.wAngleZ = 5.0
self.timerid = timer.set_timer (150, self.OnTimer)
def OnTimer(self, id, timeVal):
self.DrawScene()
def Term(self):
timer.kill_timer(self.timerid)
def DrawScene(self):
if self.busy: return
self.busy = 1
glClearColor(0.0, 0.0, 0.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glPushMatrix();
glTranslatef(0.0, 0.0, -self.radius);
glRotatef(self.wAngleX, 1.0, 0.0, 0.0);
glRotatef(self.wAngleY, 0.0, 1.0, 0.0);
glRotatef(self.wAngleZ, 0.0, 0.0, 1.0);
self.wAngleX = self.wAngleX + 1.0
self.wAngleY = self.wAngleY + 10.0
self.wAngleZ = self.wAngleZ + 5.0;
glBegin(GL_QUAD_STRIP);
glColor3f(1.0, 0.0, 1.0);
glVertex3f(-0.5, 0.5, 0.5);
glColor3f(1.0, 0.0, 0.0);
glVertex3f(-0.5, -0.5, 0.5);
glColor3f(1.0, 1.0, 1.0);
glVertex3f(0.5, 0.5, 0.5);
glColor3f(1.0, 1.0, 0.0);
glVertex3f(0.5, -0.5, 0.5);
glColor3f(0.0, 1.0, 1.0);
glVertex3f(0.5, 0.5, -0.5);
glColor3f(0.0, 1.0, 0.0);
glVertex3f(0.5, -0.5, -0.5);
glColor3f(0.0, 0.0, 1.0);
glVertex3f(-0.5, 0.5, -0.5);
glColor3f(0.0, 0.0, 0.0);
glVertex3f(-0.5, -0.5, -0.5);
glColor3f(1.0, 0.0, 1.0);
glVertex3f(-0.5, 0.5, 0.5);
glColor3f(1.0, 0.0, 0.0);
glVertex3f(-0.5, -0.5, 0.5);
glEnd();
glBegin(GL_QUADS);
glColor3f(1.0, 0.0, 1.0);
glVertex3f(-0.5, 0.5, 0.5);
glColor3f(1.0, 1.0, 1.0);
glVertex3f(0.5, 0.5, 0.5);
glColor3f(0.0, 1.0, 1.0);
glVertex3f(0.5, 0.5, -0.5);
glColor3f(0.0, 0.0, 1.0);
glVertex3f(-0.5, 0.5, -0.5);
glEnd();
glBegin(GL_QUADS);
glColor3f(1.0, 0.0, 0.0);
glVertex3f(-0.5, -0.5, 0.5);
glColor3f(1.0, 1.0, 0.0);
glVertex3f(0.5, -0.5, 0.5);
glColor3f(0.0, 1.0, 0.0);
glVertex3f(0.5, -0.5, -0.5);
glColor3f(0.0, 0.0, 0.0);
glVertex3f(-0.5, -0.5, -0.5);
glEnd();
glPopMatrix();
glFinish();
SwapBuffers(wglGetCurrentDC());
self.busy = 0
def test():
template = docview.DocTemplate(None, None, None, CubeView )
# template = docview.DocTemplate(None, None, None, TestView )
template.OpenDocumentFile(None)
if __name__=='__main__':
test()
| apache-2.0 |
dataxu/ansible-modules-core | cloud/amazon/elasticache_subnet_group.py | 51 | 5267 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: elasticache_subnet_group
version_added: "2.0"
short_description: manage Elasticache subnet groups
description:
- Creates, modifies, and deletes Elasticache subnet groups. This module has a dependency on python-boto >= 2.5.
options:
state:
description:
- Specifies whether the subnet should be present or absent.
required: true
default: present
choices: [ 'present' , 'absent' ]
name:
description:
- Database subnet group identifier.
required: true
description:
description:
- Elasticache subnet group description. Only set when a new group is added.
required: false
default: null
subnets:
description:
- List of subnet IDs that make up the Elasticache subnet group.
required: false
default: null
author: "Tim Mahoney (@timmahoney)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Add or change a subnet group
- elasticache_subnet_group
state: present
name: norwegian-blue
description: My Fancy Ex Parrot Subnet Group
subnets:
- subnet-aaaaaaaa
- subnet-bbbbbbbb
# Remove a subnet group
- elasticache_subnet_group:
state: absent
name: norwegian-blue
'''
try:
import boto
from boto.elasticache.layer1 import ElastiCacheConnection
from boto.regioninfo import RegionInfo
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state = dict(required=True, choices=['present', 'absent']),
name = dict(required=True),
description = dict(required=False),
subnets = dict(required=False, type='list'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
group_name = module.params.get('name').lower()
group_description = module.params.get('description')
group_subnets = module.params.get('subnets') or {}
if state == 'present':
for required in ['name', 'description', 'subnets']:
if not module.params.get(required):
module.fail_json(msg = str("Parameter %s required for state='present'" % required))
else:
for not_allowed in ['description', 'subnets']:
if module.params.get(not_allowed):
module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed))
# Retrieve any AWS settings from the environment.
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
if not region:
module.fail_json(msg = str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
"""Get an elasticache connection"""
try:
endpoint = "elasticache.%s.amazonaws.com" % region
connect_region = RegionInfo(name=region, endpoint=endpoint)
conn = ElastiCacheConnection(region=connect_region, **aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=e.message)
try:
changed = False
exists = False
try:
matching_groups = conn.describe_cache_subnet_groups(group_name, max_records=100)
exists = len(matching_groups) > 0
except BotoServerError, e:
if e.error_code != 'CacheSubnetGroupNotFoundFault':
module.fail_json(msg = e.error_message)
if state == 'absent':
if exists:
conn.delete_cache_subnet_group(group_name)
changed = True
else:
if not exists:
new_group = conn.create_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets)
changed = True
else:
changed_group = conn.modify_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets)
changed = True
except BotoServerError, e:
if e.error_message != 'No modifications were requested.':
module.fail_json(msg = e.error_message)
else:
changed = False
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
mikf/gallery-dl | scripts/man.py | 1 | 7598 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019-2020 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Generate man pages"""
import re
import datetime
import util
import gallery_dl.option
import gallery_dl.version
def build_gallery_dl_1(path=None):
OPTS_FMT = """.TP\n.B "{}" {}\n{}"""
TEMPLATE = r"""
.TH "GALLERY-DL" "1" "%(date)s" "%(version)s" "gallery-dl Manual"
.\" disable hyphenation
.nh
.SH NAME
gallery-dl \- download image-galleries and -collections
.SH SYNOPSIS
.B gallery-dl
[OPTION]... URL...
.SH DESCRIPTION
.B gallery-dl
is a command-line program to download image-galleries and -collections
from several image hosting sites. It is a cross-platform tool
with many configuration options and powerful filenaming capabilities.
.SH OPTIONS
%(options)s
.SH EXAMPLES
.TP
gallery-dl \f[I]URL\f[]
Download images from \f[I]URL\f[].
.TP
gallery-dl -g -u <username> -p <password> \f[I]URL\f[]
Print direct URLs from a site that requires authentication.
.TP
gallery-dl --filter 'type == "ugoira"' --range '2-4' \f[I]URL\f[]
Apply filter and range expressions. This will only download
the second, third, and fourth file where its type value is equal to "ugoira".
.TP
gallery-dl r:\f[I]URL\f[]
Scan \f[I]URL\f[] for other URLs and invoke \f[B]gallery-dl\f[] on them.
.TP
gallery-dl oauth:\f[I]SITE\-NAME\f[]
Gain OAuth authentication tokens for
.IR deviantart ,
.IR flickr ,
.IR reddit ,
.IR smugmug ", and"
.IR tumblr .
.SH FILES
.TP
.I /etc/gallery-dl.conf
The system wide configuration file.
.TP
.I ~/.config/gallery-dl/config.json
Per user configuration file.
.TP
.I ~/.gallery-dl.conf
Alternate per user configuration file.
.SH BUGS
https://github.com/mikf/gallery-dl/issues
.SH AUTHORS
Mike Fährmann <mike_faehrmann@web.de>
.br
and https://github.com/mikf/gallery-dl/graphs/contributors
.SH "SEE ALSO"
.BR gallery-dl.conf (5)
"""
options = []
for action in gallery_dl.option.build_parser()._actions:
if action.help.startswith("=="):
continue
options.append(OPTS_FMT.format(
", ".join(action.option_strings).replace("-", r"\-"),
r"\f[I]{}\f[]".format(action.metavar) if action.metavar else "",
action.help,
))
if not path:
path = util.path("data/man/gallery-dl.1")
with open(path, "w", encoding="utf-8") as file:
file.write(TEMPLATE.lstrip() % {
"options": "\n".join(options),
"version": gallery_dl.version.__version__,
"date" : datetime.datetime.now().strftime("%Y-%m-%d"),
})
def build_gallery_dl_conf_5(path=None):
TEMPLATE = r"""
.TH "GALLERY-DL.CONF" "5" "%(date)s" "%(version)s" "gallery-dl Manual"
.\" disable hyphenation
.nh
.\" disable justification (adjust text to left margin only)
.ad l
.SH NAME
gallery-dl.conf \- gallery-dl configuration file
.SH DESCRIPTION
gallery-dl will search for configuration files in the following places
every time it is started, unless
.B --ignore-config
is specified:
.PP
.RS 4
.nf
.I /etc/gallery-dl.conf
.I $HOME/.config/gallery-dl/config.json
.I $HOME/.gallery-dl.conf
.fi
.RE
.PP
It is also possible to specify additional configuration files with the
.B -c/--config
command-line option or to add further option values with
.B -o/--option
as <key>=<value> pairs,
Configuration files are JSON-based and therefore don't allow any ordinary
comments, but, since unused keys are simply ignored, it is possible to utilize
those as makeshift comments by settings their values to arbitrary strings.
.SH EXAMPLE
{
.RS 4
"base-directory": "/tmp/",
.br
"extractor": {
.RS 4
"pixiv": {
.RS 4
"directory": ["Pixiv", "Works", "{user[id]}"],
.br
"filename": "{id}{num}.{extension}",
.br
"username": "foo",
.br
"password": "bar"
.RE
},
.br
"flickr": {
.RS 4
"_comment": "OAuth keys for account 'foobar'",
.br
"access-token": "0123456789-0123456789abcdef",
.br
"access-token-secret": "fedcba9876543210"
.RE
}
.RE
},
.br
"downloader": {
.RS 4
"retries": 3,
.br
"timeout": 2.5
.RE
}
.RE
}
%(options)s
.SH BUGS
https://github.com/mikf/gallery-dl/issues
.SH AUTHORS
Mike Fährmann <mike_faehrmann@web.de>
.br
and https://github.com/mikf/gallery-dl/graphs/contributors
.SH "SEE ALSO"
.BR gallery-dl (1)
"""
sections = parse_docs_configuration()
content = []
for sec_name, section in sections.items():
content.append(".SH " + sec_name.upper())
for opt_name, option in section.items():
content.append(".SS " + opt_name)
for field, text in option.items():
if field in ("Type", "Default"):
content.append('.IP "{}:" {}'.format(field, len(field)+2))
content.append(strip_rst(text))
else:
content.append('.IP "{}:" 4'.format(field))
content.append(strip_rst(text, field != "Example"))
if not path:
path = util.path("data/man/gallery-dl.conf.5")
with open(path, "w", encoding="utf-8") as file:
file.write(TEMPLATE.lstrip() % {
"options": "\n".join(content),
"version": gallery_dl.version.__version__,
"date" : datetime.datetime.now().strftime("%Y-%m-%d"),
})
def parse_docs_configuration():
doc_path = util.path("docs", "configuration.rst")
with open(doc_path, encoding="utf-8") as file:
doc_lines = file.readlines()
sections = {}
sec_name = None
options = None
opt_name = None
opt_desc = None
name = None
last = None
for line in doc_lines:
if line[0] == ".":
continue
# start of new section
elif re.match(r"^=+$", line):
if sec_name and options:
sections[sec_name] = options
sec_name = last.strip()
options = {}
# start of new option block
elif re.match(r"^-+$", line):
opt_name = last.strip()
opt_desc = {}
# end of option block
elif opt_name and opt_desc and line == "\n" and not last:
options[opt_name] = opt_desc
opt_name = None
name = None
# inside option block
elif opt_name:
if line[0].isalpha():
name = line.strip()
opt_desc[name] = ""
else:
line = line.strip()
if line.startswith(("* ", "- ")):
# list item
line = ".br\n" + line
elif line.startswith("| "):
# line block
line = line[2:] + "\n.br"
opt_desc[name] += line + "\n"
last = line
sections[sec_name] = options
return sections
def strip_rst(text, extended=True, *, ITALIC=r"\\f[I]\1\\f[]", REGULAR=r"\1"):
text = text.replace("\\", "\\\\")
# ``foo``
repl = ITALIC if extended else REGULAR
text = re.sub(r"``([^`]+)``", repl, text)
# |foo|_
text = re.sub(r"\|([^|]+)\|_*", ITALIC, text)
# `foo <bar>`__
text = re.sub(r"`([^`<]+) <[^>`]+>`_+", ITALIC, text)
# `foo`_
text = re.sub(r"`([^`]+)`_+", ITALIC, text)
# `foo`
text = re.sub(r"`([^`]+)`", REGULAR, text)
# foo_
text = re.sub(r"([A-Za-z0-9-]+)_+(?=\s)", ITALIC, text)
# -------
text = re.sub(r"---+", "", text)
return text
if __name__ == "__main__":
build_gallery_dl_1()
build_gallery_dl_conf_5()
| gpl-2.0 |
scripni/rethinkdb | packaging/osx/biplist/__init__.py | 37 | 31532 | """biplist -- a library for reading and writing binary property list files.
Binary Property List (plist) files provide a faster and smaller serialization
format for property lists on OS X. This is a library for generating binary
plists which can be read by OS X, iOS, or other clients.
The API models the plistlib API, and will call through to plistlib when
XML serialization or deserialization is required.
To generate plists with UID values, wrap the values with the Uid object. The
value must be an int.
To generate plists with NSData/CFData values, wrap the values with the
Data object. The value must be a string.
Date values can only be datetime.datetime objects.
The exceptions InvalidPlistException and NotBinaryPlistException may be
thrown to indicate that the data cannot be serialized or deserialized as
a binary plist.
Plist generation example:
from biplist import *
from datetime import datetime
plist = {'aKey':'aValue',
'0':1.322,
'now':datetime.now(),
'list':[1,2,3],
'tuple':('a','b','c')
}
try:
writePlist(plist, "example.plist")
except (InvalidPlistException, NotBinaryPlistException), e:
print "Something bad happened:", e
Plist parsing example:
from biplist import *
try:
plist = readPlist("example.plist")
print plist
except (InvalidPlistException, NotBinaryPlistException), e:
print "Not a plist:", e
"""
import sys
from collections import namedtuple
import datetime
import io
import math
import plistlib
from struct import pack, unpack
from struct import error as struct_error
import sys
import time
try:
unicode
unicodeEmpty = r''
except NameError:
unicode = str
unicodeEmpty = ''
try:
long
except NameError:
long = int
try:
{}.iteritems
iteritems = lambda x: x.iteritems()
except AttributeError:
iteritems = lambda x: x.items()
__all__ = [
'Uid', 'Data', 'readPlist', 'writePlist', 'readPlistFromString',
'writePlistToString', 'InvalidPlistException', 'NotBinaryPlistException'
]
# Apple uses Jan 1, 2001 as a base for all plist date/times.
apple_reference_date = datetime.datetime.utcfromtimestamp(978307200)
class Uid(int):
"""Wrapper around integers for representing UID values. This
is used in keyed archiving."""
def __repr__(self):
return "Uid(%d)" % self
class Data(bytes):
"""Wrapper around bytes to distinguish Data values."""
class InvalidPlistException(Exception):
"""Raised when the plist is incorrectly formatted."""
class NotBinaryPlistException(Exception):
"""Raised when a binary plist was expected but not encountered."""
def readPlist(pathOrFile):
"""Raises NotBinaryPlistException, InvalidPlistException"""
didOpen = False
result = None
if isinstance(pathOrFile, (bytes, unicode)):
pathOrFile = open(pathOrFile, 'rb')
didOpen = True
try:
reader = PlistReader(pathOrFile)
result = reader.parse()
except NotBinaryPlistException as e:
try:
pathOrFile.seek(0)
result = None
if hasattr(plistlib, 'loads'):
contents = None
if isinstance(pathOrFile, (bytes, unicode)):
with open(pathOrFile, 'rb') as f:
contents = f.read()
else:
contents = pathOrFile.read()
result = plistlib.loads(contents)
else:
result = plistlib.readPlist(pathOrFile)
result = wrapDataObject(result, for_binary=True)
except Exception as e:
raise InvalidPlistException(e)
finally:
if didOpen:
pathOrFile.close()
return result
def wrapDataObject(o, for_binary=False):
if isinstance(o, Data) and not for_binary:
v = sys.version_info
if not (v[0] >= 3 and v[1] >= 4):
o = plistlib.Data(o)
elif isinstance(o, (bytes, plistlib.Data)) and for_binary:
if hasattr(o, 'data'):
o = Data(o.data)
elif isinstance(o, tuple):
o = wrapDataObject(list(o), for_binary)
o = tuple(o)
elif isinstance(o, list):
for i in range(len(o)):
o[i] = wrapDataObject(o[i], for_binary)
elif isinstance(o, dict):
for k in o:
o[k] = wrapDataObject(o[k], for_binary)
return o
def writePlist(rootObject, pathOrFile, binary=True):
if not binary:
rootObject = wrapDataObject(rootObject, binary)
if hasattr(plistlib, "dump"):
if isinstance(pathOrFile, (bytes, unicode)):
with open(pathOrFile, 'wb') as f:
return plistlib.dump(rootObject, f)
else:
return plistlib.dump(rootObject, pathOrFile)
else:
return plistlib.writePlist(rootObject, pathOrFile)
else:
didOpen = False
if isinstance(pathOrFile, (bytes, unicode)):
pathOrFile = open(pathOrFile, 'wb')
didOpen = True
writer = PlistWriter(pathOrFile)
result = writer.writeRoot(rootObject)
if didOpen:
pathOrFile.close()
return result
def readPlistFromString(data):
return readPlist(io.BytesIO(data))
def writePlistToString(rootObject, binary=True):
if not binary:
rootObject = wrapDataObject(rootObject, binary)
if hasattr(plistlib, "dumps"):
return plistlib.dumps(rootObject)
elif hasattr(plistlib, "writePlistToBytes"):
return plistlib.writePlistToBytes(rootObject)
else:
return plistlib.writePlistToString(rootObject)
else:
ioObject = io.BytesIO()
writer = PlistWriter(ioObject)
writer.writeRoot(rootObject)
return ioObject.getvalue()
def is_stream_binary_plist(stream):
stream.seek(0)
header = stream.read(7)
if header == b'bplist0':
return True
else:
return False
PlistTrailer = namedtuple('PlistTrailer', 'offsetSize, objectRefSize, offsetCount, topLevelObjectNumber, offsetTableOffset')
PlistByteCounts = namedtuple('PlistByteCounts', 'nullBytes, boolBytes, intBytes, realBytes, dateBytes, dataBytes, stringBytes, uidBytes, arrayBytes, setBytes, dictBytes')
class PlistReader(object):
file = None
contents = ''
offsets = None
trailer = None
currentOffset = 0
def __init__(self, fileOrStream):
"""Raises NotBinaryPlistException."""
self.reset()
self.file = fileOrStream
def parse(self):
return self.readRoot()
def reset(self):
self.trailer = None
self.contents = ''
self.offsets = []
self.currentOffset = 0
def readRoot(self):
result = None
self.reset()
# Get the header, make sure it's a valid file.
if not is_stream_binary_plist(self.file):
raise NotBinaryPlistException()
self.file.seek(0)
self.contents = self.file.read()
if len(self.contents) < 32:
raise InvalidPlistException("File is too short.")
trailerContents = self.contents[-32:]
try:
self.trailer = PlistTrailer._make(unpack("!xxxxxxBBQQQ", trailerContents))
offset_size = self.trailer.offsetSize * self.trailer.offsetCount
offset = self.trailer.offsetTableOffset
offset_contents = self.contents[offset:offset+offset_size]
offset_i = 0
while offset_i < self.trailer.offsetCount:
begin = self.trailer.offsetSize*offset_i
tmp_contents = offset_contents[begin:begin+self.trailer.offsetSize]
tmp_sized = self.getSizedInteger(tmp_contents, self.trailer.offsetSize)
self.offsets.append(tmp_sized)
offset_i += 1
self.setCurrentOffsetToObjectNumber(self.trailer.topLevelObjectNumber)
result = self.readObject()
except TypeError as e:
raise InvalidPlistException(e)
return result
def setCurrentOffsetToObjectNumber(self, objectNumber):
self.currentOffset = self.offsets[objectNumber]
def readObject(self):
result = None
tmp_byte = self.contents[self.currentOffset:self.currentOffset+1]
marker_byte = unpack("!B", tmp_byte)[0]
format = (marker_byte >> 4) & 0x0f
extra = marker_byte & 0x0f
self.currentOffset += 1
def proc_extra(extra):
if extra == 0b1111:
#self.currentOffset += 1
extra = self.readObject()
return extra
# bool, null, or fill byte
if format == 0b0000:
if extra == 0b0000:
result = None
elif extra == 0b1000:
result = False
elif extra == 0b1001:
result = True
elif extra == 0b1111:
pass # fill byte
else:
raise InvalidPlistException("Invalid object found at offset: %d" % (self.currentOffset - 1))
# int
elif format == 0b0001:
extra = proc_extra(extra)
result = self.readInteger(pow(2, extra))
# real
elif format == 0b0010:
extra = proc_extra(extra)
result = self.readReal(extra)
# date
elif format == 0b0011 and extra == 0b0011:
result = self.readDate()
# data
elif format == 0b0100:
extra = proc_extra(extra)
result = self.readData(extra)
# ascii string
elif format == 0b0101:
extra = proc_extra(extra)
result = self.readAsciiString(extra)
# Unicode string
elif format == 0b0110:
extra = proc_extra(extra)
result = self.readUnicode(extra)
# uid
elif format == 0b1000:
result = self.readUid(extra)
# array
elif format == 0b1010:
extra = proc_extra(extra)
result = self.readArray(extra)
# set
elif format == 0b1100:
extra = proc_extra(extra)
result = set(self.readArray(extra))
# dict
elif format == 0b1101:
extra = proc_extra(extra)
result = self.readDict(extra)
else:
raise InvalidPlistException("Invalid object found: {format: %s, extra: %s}" % (bin(format), bin(extra)))
return result
def readInteger(self, byteSize):
result = 0
original_offset = self.currentOffset
data = self.contents[self.currentOffset:self.currentOffset + byteSize]
result = self.getSizedInteger(data, byteSize, as_number=True)
self.currentOffset = original_offset + byteSize
return result
def readReal(self, length):
result = 0.0
to_read = pow(2, length)
data = self.contents[self.currentOffset:self.currentOffset+to_read]
if length == 2: # 4 bytes
result = unpack('>f', data)[0]
elif length == 3: # 8 bytes
result = unpack('>d', data)[0]
else:
raise InvalidPlistException("Unknown real of length %d bytes" % to_read)
return result
def readRefs(self, count):
refs = []
i = 0
while i < count:
fragment = self.contents[self.currentOffset:self.currentOffset+self.trailer.objectRefSize]
ref = self.getSizedInteger(fragment, len(fragment))
refs.append(ref)
self.currentOffset += self.trailer.objectRefSize
i += 1
return refs
def readArray(self, count):
result = []
values = self.readRefs(count)
i = 0
while i < len(values):
self.setCurrentOffsetToObjectNumber(values[i])
value = self.readObject()
result.append(value)
i += 1
return result
def readDict(self, count):
result = {}
keys = self.readRefs(count)
values = self.readRefs(count)
i = 0
while i < len(keys):
self.setCurrentOffsetToObjectNumber(keys[i])
key = self.readObject()
self.setCurrentOffsetToObjectNumber(values[i])
value = self.readObject()
result[key] = value
i += 1
return result
def readAsciiString(self, length):
result = unpack("!%ds" % length, self.contents[self.currentOffset:self.currentOffset+length])[0]
self.currentOffset += length
return result
def readUnicode(self, length):
actual_length = length*2
data = self.contents[self.currentOffset:self.currentOffset+actual_length]
# unpack not needed?!! data = unpack(">%ds" % (actual_length), data)[0]
self.currentOffset += actual_length
return data.decode('utf_16_be')
def readDate(self):
result = unpack(">d", self.contents[self.currentOffset:self.currentOffset+8])[0]
# Use timedelta to workaround time_t size limitation on 32-bit python.
result = datetime.timedelta(seconds=result) + apple_reference_date
self.currentOffset += 8
return result
def readData(self, length):
result = self.contents[self.currentOffset:self.currentOffset+length]
self.currentOffset += length
return Data(result)
def readUid(self, length):
return Uid(self.readInteger(length+1))
def getSizedInteger(self, data, byteSize, as_number=False):
"""Numbers of 8 bytes are signed integers when they refer to numbers, but unsigned otherwise."""
result = 0
# 1, 2, and 4 byte integers are unsigned
if byteSize == 1:
result = unpack('>B', data)[0]
elif byteSize == 2:
result = unpack('>H', data)[0]
elif byteSize == 4:
result = unpack('>L', data)[0]
elif byteSize == 8:
if as_number:
result = unpack('>q', data)[0]
else:
result = unpack('>Q', data)[0]
elif byteSize <= 16:
# Handle odd-sized or integers larger than 8 bytes
# Don't naively go over 16 bytes, in order to prevent infinite loops.
result = 0
if hasattr(int, 'from_bytes'):
result = int.from_bytes(data, 'big')
else:
for byte in data:
result = (result << 8) | unpack('>B', byte)[0]
else:
raise InvalidPlistException("Encountered integer longer than 16 bytes.")
return result
class HashableWrapper(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return "<HashableWrapper: %s>" % [self.value]
class BoolWrapper(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return "<BoolWrapper: %s>" % self.value
class FloatWrapper(object):
_instances = {}
def __new__(klass, value):
# Ensure FloatWrapper(x) for a given float x is always the same object
wrapper = klass._instances.get(value)
if wrapper is None:
wrapper = object.__new__(klass)
wrapper.value = value
klass._instances[value] = wrapper
return wrapper
def __repr__(self):
return "<FloatWrapper: %s>" % self.value
class StringWrapper(object):
__instances = {}
encodedValue = None
encoding = None
def __new__(cls, value):
'''Ensure we only have a only one instance for any string,
and that we encode ascii as 1-byte-per character when possible'''
encodedValue = None
for encoding in ('ascii', 'utf_16_be'):
try:
encodedValue = value.encode(encoding)
except: pass
if encodedValue is not None:
if encodedValue not in cls.__instances:
cls.__instances[encodedValue] = super(StringWrapper, cls).__new__(cls)
cls.__instances[encodedValue].encodedValue = encodedValue
cls.__instances[encodedValue].encoding = encoding
return cls.__instances[encodedValue]
raise ValueError('Unable to get ascii or utf_16_be encoding for %s' % repr(value))
def __len__(self):
'''Return roughly the number of characters in this string (half the byte length)'''
if self.encoding == 'ascii':
return len(self.encodedValue)
else:
return len(self.encodedValue)//2
@property
def encodingMarker(self):
if self.encoding == 'ascii':
return 0b0101
else:
return 0b0110
def __repr__(self):
return '<StringWrapper (%s): %s>' % (self.encoding, self.encodedValue)
class PlistWriter(object):
header = b'bplist00bybiplist1.0'
file = None
byteCounts = None
trailer = None
computedUniques = None
writtenReferences = None
referencePositions = None
wrappedTrue = None
wrappedFalse = None
def __init__(self, file):
self.reset()
self.file = file
self.wrappedTrue = BoolWrapper(True)
self.wrappedFalse = BoolWrapper(False)
def reset(self):
self.byteCounts = PlistByteCounts(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
self.trailer = PlistTrailer(0, 0, 0, 0, 0)
# A set of all the uniques which have been computed.
self.computedUniques = set()
# A list of all the uniques which have been written.
self.writtenReferences = {}
# A dict of the positions of the written uniques.
self.referencePositions = {}
def positionOfObjectReference(self, obj):
"""If the given object has been written already, return its
position in the offset table. Otherwise, return None."""
return self.writtenReferences.get(obj)
def writeRoot(self, root):
"""
Strategy is:
- write header
- wrap root object so everything is hashable
- compute size of objects which will be written
- need to do this in order to know how large the object refs
will be in the list/dict/set reference lists
- write objects
- keep objects in writtenReferences
- keep positions of object references in referencePositions
- write object references with the length computed previously
- computer object reference length
- write object reference positions
- write trailer
"""
output = self.header
wrapped_root = self.wrapRoot(root)
should_reference_root = True#not isinstance(wrapped_root, HashableWrapper)
self.computeOffsets(wrapped_root, asReference=should_reference_root, isRoot=True)
self.trailer = self.trailer._replace(**{'objectRefSize':self.intSize(len(self.computedUniques))})
self.writeObjectReference(wrapped_root, output)
output = self.writeObject(wrapped_root, output, setReferencePosition=True)
# output size at this point is an upper bound on how big the
# object reference offsets need to be.
self.trailer = self.trailer._replace(**{
'offsetSize':self.intSize(len(output)),
'offsetCount':len(self.computedUniques),
'offsetTableOffset':len(output),
'topLevelObjectNumber':0
})
output = self.writeOffsetTable(output)
output += pack('!xxxxxxBBQQQ', *self.trailer)
self.file.write(output)
def wrapRoot(self, root):
if isinstance(root, bool):
if root is True:
return self.wrappedTrue
else:
return self.wrappedFalse
elif isinstance(root, float):
return FloatWrapper(root)
elif isinstance(root, set):
n = set()
for value in root:
n.add(self.wrapRoot(value))
return HashableWrapper(n)
elif isinstance(root, dict):
n = {}
for key, value in iteritems(root):
n[self.wrapRoot(key)] = self.wrapRoot(value)
return HashableWrapper(n)
elif isinstance(root, list):
n = []
for value in root:
n.append(self.wrapRoot(value))
return HashableWrapper(n)
elif isinstance(root, tuple):
n = tuple([self.wrapRoot(value) for value in root])
return HashableWrapper(n)
elif isinstance(root, (str, unicode)) and not isinstance(root, Data):
return StringWrapper(root)
else:
return root
def incrementByteCount(self, field, incr=1):
self.byteCounts = self.byteCounts._replace(**{field:self.byteCounts.__getattribute__(field) + incr})
def computeOffsets(self, obj, asReference=False, isRoot=False):
def check_key(key):
if key is None:
raise InvalidPlistException('Dictionary keys cannot be null in plists.')
elif isinstance(key, Data):
raise InvalidPlistException('Data cannot be dictionary keys in plists.')
elif not isinstance(key, StringWrapper):
raise InvalidPlistException('Keys must be strings.')
def proc_size(size):
if size > 0b1110:
size += self.intSize(size)
return size
# If this should be a reference, then we keep a record of it in the
# uniques table.
if asReference:
if obj in self.computedUniques:
return
else:
self.computedUniques.add(obj)
if obj is None:
self.incrementByteCount('nullBytes')
elif isinstance(obj, BoolWrapper):
self.incrementByteCount('boolBytes')
elif isinstance(obj, Uid):
size = self.intSize(obj)
self.incrementByteCount('uidBytes', incr=1+size)
elif isinstance(obj, (int, long)):
size = self.intSize(obj)
self.incrementByteCount('intBytes', incr=1+size)
elif isinstance(obj, FloatWrapper):
size = self.realSize(obj)
self.incrementByteCount('realBytes', incr=1+size)
elif isinstance(obj, datetime.datetime):
self.incrementByteCount('dateBytes', incr=2)
elif isinstance(obj, Data):
size = proc_size(len(obj))
self.incrementByteCount('dataBytes', incr=1+size)
elif isinstance(obj, StringWrapper):
size = proc_size(len(obj))
self.incrementByteCount('stringBytes', incr=1+size)
elif isinstance(obj, HashableWrapper):
obj = obj.value
if isinstance(obj, set):
size = proc_size(len(obj))
self.incrementByteCount('setBytes', incr=1+size)
for value in obj:
self.computeOffsets(value, asReference=True)
elif isinstance(obj, (list, tuple)):
size = proc_size(len(obj))
self.incrementByteCount('arrayBytes', incr=1+size)
for value in obj:
asRef = True
self.computeOffsets(value, asReference=True)
elif isinstance(obj, dict):
size = proc_size(len(obj))
self.incrementByteCount('dictBytes', incr=1+size)
for key, value in iteritems(obj):
check_key(key)
self.computeOffsets(key, asReference=True)
self.computeOffsets(value, asReference=True)
else:
raise InvalidPlistException("Unknown object type.")
def writeObjectReference(self, obj, output):
"""Tries to write an object reference, adding it to the references
table. Does not write the actual object bytes or set the reference
position. Returns a tuple of whether the object was a new reference
(True if it was, False if it already was in the reference table)
and the new output.
"""
position = self.positionOfObjectReference(obj)
if position is None:
self.writtenReferences[obj] = len(self.writtenReferences)
output += self.binaryInt(len(self.writtenReferences) - 1, byteSize=self.trailer.objectRefSize)
return (True, output)
else:
output += self.binaryInt(position, byteSize=self.trailer.objectRefSize)
return (False, output)
def writeObject(self, obj, output, setReferencePosition=False):
"""Serializes the given object to the output. Returns output.
If setReferencePosition is True, will set the position the
object was written.
"""
def proc_variable_length(format, length):
result = b''
if length > 0b1110:
result += pack('!B', (format << 4) | 0b1111)
result = self.writeObject(length, result)
else:
result += pack('!B', (format << 4) | length)
return result
if isinstance(obj, (str, unicode)) and obj == unicodeEmpty:
# The Apple Plist decoder can't decode a zero length Unicode string.
obj = b''
if setReferencePosition:
self.referencePositions[obj] = len(output)
if obj is None:
output += pack('!B', 0b00000000)
elif isinstance(obj, BoolWrapper):
if obj.value is False:
output += pack('!B', 0b00001000)
else:
output += pack('!B', 0b00001001)
elif isinstance(obj, Uid):
size = self.intSize(obj)
output += pack('!B', (0b1000 << 4) | size - 1)
output += self.binaryInt(obj)
elif isinstance(obj, (int, long)):
byteSize = self.intSize(obj)
root = math.log(byteSize, 2)
output += pack('!B', (0b0001 << 4) | int(root))
output += self.binaryInt(obj, as_number=True)
elif isinstance(obj, FloatWrapper):
# just use doubles
output += pack('!B', (0b0010 << 4) | 3)
output += self.binaryReal(obj)
elif isinstance(obj, datetime.datetime):
timestamp = (obj - apple_reference_date).total_seconds()
output += pack('!B', 0b00110011)
output += pack('!d', float(timestamp))
elif isinstance(obj, Data):
output += proc_variable_length(0b0100, len(obj))
output += obj
elif isinstance(obj, StringWrapper):
output += proc_variable_length(obj.encodingMarker, len(obj))
output += obj.encodedValue
elif isinstance(obj, bytes):
output += proc_variable_length(0b0101, len(obj))
output += obj
elif isinstance(obj, HashableWrapper):
obj = obj.value
if isinstance(obj, (set, list, tuple)):
if isinstance(obj, set):
output += proc_variable_length(0b1100, len(obj))
else:
output += proc_variable_length(0b1010, len(obj))
objectsToWrite = []
for objRef in obj:
(isNew, output) = self.writeObjectReference(objRef, output)
if isNew:
objectsToWrite.append(objRef)
for objRef in objectsToWrite:
output = self.writeObject(objRef, output, setReferencePosition=True)
elif isinstance(obj, dict):
output += proc_variable_length(0b1101, len(obj))
keys = []
values = []
objectsToWrite = []
for key, value in iteritems(obj):
keys.append(key)
values.append(value)
for key in keys:
(isNew, output) = self.writeObjectReference(key, output)
if isNew:
objectsToWrite.append(key)
for value in values:
(isNew, output) = self.writeObjectReference(value, output)
if isNew:
objectsToWrite.append(value)
for objRef in objectsToWrite:
output = self.writeObject(objRef, output, setReferencePosition=True)
return output
def writeOffsetTable(self, output):
"""Writes all of the object reference offsets."""
all_positions = []
writtenReferences = list(self.writtenReferences.items())
writtenReferences.sort(key=lambda x: x[1])
for obj,order in writtenReferences:
# Porting note: Elsewhere we deliberately replace empty unicdoe strings
# with empty binary strings, but the empty unicode string
# goes into writtenReferences. This isn't an issue in Py2
# because u'' and b'' have the same hash; but it is in
# Py3, where they don't.
if bytes != str and obj == unicodeEmpty:
obj = b''
position = self.referencePositions.get(obj)
if position is None:
raise InvalidPlistException("Error while writing offsets table. Object not found. %s" % obj)
output += self.binaryInt(position, self.trailer.offsetSize)
all_positions.append(position)
return output
def binaryReal(self, obj):
# just use doubles
result = pack('>d', obj.value)
return result
def binaryInt(self, obj, byteSize=None, as_number=False):
result = b''
if byteSize is None:
byteSize = self.intSize(obj)
if byteSize == 1:
result += pack('>B', obj)
elif byteSize == 2:
result += pack('>H', obj)
elif byteSize == 4:
result += pack('>L', obj)
elif byteSize == 8:
if as_number:
result += pack('>q', obj)
else:
result += pack('>Q', obj)
elif byteSize <= 16:
try:
result = pack('>Q', 0) + pack('>Q', obj)
except struct_error as e:
raise InvalidPlistException("Unable to pack integer %d: %s" % (obj, e))
else:
raise InvalidPlistException("Core Foundation can't handle integers with size greater than 16 bytes.")
return result
def intSize(self, obj):
"""Returns the number of bytes necessary to store the given integer."""
# SIGNED
if obj < 0: # Signed integer, always 8 bytes
return 8
# UNSIGNED
elif obj <= 0xFF: # 1 byte
return 1
elif obj <= 0xFFFF: # 2 bytes
return 2
elif obj <= 0xFFFFFFFF: # 4 bytes
return 4
# SIGNED
# 0x7FFFFFFFFFFFFFFF is the max.
elif obj <= 0x7FFFFFFFFFFFFFFF: # 8 bytes signed
return 8
elif obj <= 0xffffffffffffffff: # 8 bytes unsigned
return 16
else:
raise InvalidPlistException("Core Foundation can't handle integers with size greater than 8 bytes.")
def realSize(self, obj):
return 8
| agpl-3.0 |
gregvonkuster/icqsol | bem/icqBaseLaplaceSolver.py | 2 | 2956 | #!/usr/bin/env python
from __future__ import print_function
import vtk
import numpy
from icqsol.bem.icqBaseSolver import BaseSolver
from icqsol.bem.icqPotentialIntegrals import PotentialIntegrals
from icqsol.bem.icqQuadrature import gaussPtsAndWeights
from icqsol.util.icqSharedLibraryUtils import getSharedLibraryName
from ctypes import cdll, POINTER, byref, c_void_p, c_double, c_long
FOUR_PI = 4. * numpy.pi
class BaseLaplaceSolver(BaseSolver):
def __init__(self, pdata, max_edge_length, order=5):
"""
Constructor
@param pdata instance of vtkPolyData
@param max_edge_length maximum edge length, used to turn
polygons into triangles
"""
BaseSolver.__init__(self, pdata, max_edge_length, order)
libName = getSharedLibraryName('icqLaplaceMatricesCpp')
self.lib = cdll.LoadLibrary(libName)
shp = (self.numTriangles, self.numTriangles)
self.gMat = numpy.zeros(shp, numpy.float64)
self.__computeResponseMatrix()
def __computeDiagonalTerms(self):
# Gauss points and weights
gpws = gaussPtsAndWeights[self.order]
npts = gpws.shape[1]
xsis, etas, weights = gpws[0, :], gpws[1, :], gpws[2, :]
# iterate over the source triangles
for jSrc in range(self.numTriangles):
ia, ib, ic = self.ptIdList[jSrc]
# The triangle vertex positions
paSrc = numpy.array(self.points.GetPoint(ia))
pbSrc = numpy.array(self.points.GetPoint(ib))
pcSrc = numpy.array(self.points.GetPoint(ic))
dbSrc = pbSrc - paSrc
dcSrc = pcSrc - paSrc
# Iterate over the observer points
g = 0
for ipt in range(npts):
# Observer point
xObs = paSrc + xsis[ipt]*dbSrc + etas[ipt]*dcSrc
# Three triangles having observer point as one corner
pot0ab = PotentialIntegrals(xObs, paSrc, pbSrc, self.order)
pot0bc = PotentialIntegrals(xObs, pbSrc, pcSrc, self.order)
pot0ca = PotentialIntegrals(xObs, pcSrc, paSrc, self.order)
g += weights[ipt] * (pot0ab.getIntegralOneOverR() + \
pot0bc.getIntegralOneOverR() + \
pot0ca.getIntegralOneOverR())
self.gMat[jSrc, jSrc] = g / (-FOUR_PI)
def __computeOffDiagonalTerms(self):
addr = int(self.pdata.GetAddressAsString('vtkPolyData')[5:], 0)
self.lib.computeOffDiagonalTerms(c_long(addr),
self.gMat.ctypes.data_as(POINTER(c_double)))
def __computeResponseMatrix(self):
self.__computeDiagonalTerms()
self.__computeOffDiagonalTerms()
def getGreenMatrix(self):
"""
Return the Green function matrix
@return matrix
"""
return self.gMat
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.