repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
axbaretto/beam | sdks/python/apache_beam/examples/snippets/transforms/elementwise/map_test.py | 5 | 2481 | # coding=utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
import unittest
import mock
from apache_beam.examples.snippets.util import assert_matches_stdout
from apache_beam.testing.test_pipeline import TestPipeline
from . import map
def check_plants(actual):
expected = '''[START plants]
🍓Strawberry
🥕Carrot
🍆Eggplant
🍅Tomato
🥔Potato
[END plants]'''.splitlines()[1:-1]
assert_matches_stdout(actual, expected)
def check_plant_details(actual):
expected = '''[START plant_details]
{'icon': '🍓', 'name': 'Strawberry', 'duration': 'perennial'}
{'icon': '🥕', 'name': 'Carrot', 'duration': 'biennial'}
{'icon': '🍆', 'name': 'Eggplant', 'duration': 'perennial'}
{'icon': '🍅', 'name': 'Tomato', 'duration': 'annual'}
{'icon': '🥔', 'name': 'Potato', 'duration': 'perennial'}
[END plant_details]'''.splitlines()[1:-1]
assert_matches_stdout(actual, expected)
@mock.patch('apache_beam.Pipeline', TestPipeline)
@mock.patch(
'apache_beam.examples.snippets.transforms.elementwise.map.print', str)
class MapTest(unittest.TestCase):
def test_map_simple(self):
map.map_simple(check_plants)
def test_map_function(self):
map.map_function(check_plants)
def test_map_lambda(self):
map.map_lambda(check_plants)
def test_map_multiple_arguments(self):
map.map_multiple_arguments(check_plants)
def test_map_tuple(self):
map.map_tuple(check_plants)
def test_map_side_inputs_singleton(self):
map.map_side_inputs_singleton(check_plants)
def test_map_side_inputs_iter(self):
map.map_side_inputs_iter(check_plants)
def test_map_side_inputs_dict(self):
map.map_side_inputs_dict(check_plant_details)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
vaquer/FastReadingPy | FastReading/settings.py | 1 | 2293 | """
Django settings for FastReading project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'b!6=c08m5a_p^k)*kegw$=lx=54imkzp&5c!1md46i5tq)!(0%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
UPLOAD_FILES = os.path.join(BASE_DIR, 'UP')
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_DIRS = (
'{0}/FastReading/templates'.format(BASE_DIR),
'{0}/FastReading/static/js'.format(BASE_DIR),
)
ROOT_URLCONF = 'FastReading.urls'
WSGI_APPLICATION = 'FastReading.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
'{0}/FastReading/static/'.format(BASE_DIR),
)
| gpl-2.0 |
xinjiguaike/edx-platform | cms/envs/test.py | 15 | 9245 | # -*- coding: utf-8 -*-
"""
This config file runs the simplest dev environment using sqlite, and db-based
sessions. Assumes structure:
/envroot/
/db # This is where it'll write the database file
/edx-platform # The location of this repo
/log # Where we're going to write log files
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
# Pylint gets confused by path.py instances, which report themselves as class
# objects. As a result, pylint applies the wrong regex in validating names,
# and throws spurious errors. Therefore, we disable invalid-name checking.
# pylint: disable=invalid-name
from .common import *
import os
from path import Path as path
from warnings import filterwarnings, simplefilter
from uuid import uuid4
# import settings from LMS for consistent behavior with CMS
# pylint: disable=unused-import
from lms.envs.test import (
WIKI_ENABLED,
PLATFORM_NAME,
SITE_NAME,
DEFAULT_FILE_STORAGE,
MEDIA_ROOT,
MEDIA_URL,
# This is practically unused but needed by the oauth2_provider package, which
# some tests in common/ rely on.
OAUTH_OIDC_ISSUER,
)
# mongo connection settings
MONGO_PORT_NUM = int(os.environ.get('EDXAPP_TEST_MONGO_PORT', '27017'))
MONGO_HOST = os.environ.get('EDXAPP_TEST_MONGO_HOST', 'localhost')
THIS_UUID = uuid4().hex[:5]
# Nose Test Runner
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
_SYSTEM = 'cms'
_REPORT_DIR = REPO_ROOT / 'reports' / _SYSTEM
_REPORT_DIR.makedirs_p()
_NOSEID_DIR = REPO_ROOT / '.testids' / _SYSTEM
_NOSEID_DIR.makedirs_p()
NOSE_ARGS = [
'--id-file', _NOSEID_DIR / 'noseids',
'--xunit-file', _REPORT_DIR / 'nosetests.xml',
]
TEST_ROOT = path('test_root')
# Want static files in the same dir for running on jenkins.
STATIC_ROOT = TEST_ROOT / "staticfiles"
GITHUB_REPO_ROOT = TEST_ROOT / "data"
DATA_DIR = TEST_ROOT / "data"
COMMON_TEST_DATA_ROOT = COMMON_ROOT / "test" / "data"
# For testing "push to lms"
FEATURES['ENABLE_EXPORT_GIT'] = True
GIT_REPO_EXPORT_DIR = TEST_ROOT / "export_course_repos"
# Makes the tests run much faster...
SOUTH_TESTS_MIGRATE = False # To disable migrations and use syncdb instead
# TODO (cpennington): We need to figure out how envs/test.py can inject things into common.py so that we don't have to repeat this sort of thing
STATICFILES_DIRS = [
COMMON_ROOT / "static",
PROJECT_ROOT / "static",
]
STATICFILES_DIRS += [
(course_dir, COMMON_TEST_DATA_ROOT / course_dir)
for course_dir in os.listdir(COMMON_TEST_DATA_ROOT)
if os.path.isdir(COMMON_TEST_DATA_ROOT / course_dir)
]
# Avoid having to run collectstatic before the unit test suite
# If we don't add these settings, then Django templates that can't
# find pipelined assets will raise a ValueError.
# http://stackoverflow.com/questions/12816941/unit-testing-with-django-pipeline
STATICFILES_STORAGE = 'pipeline.storage.NonPackagingPipelineStorage'
STATIC_URL = "/static/"
PIPELINE_ENABLED = False
TENDER_DOMAIN = "help.edge.edx.org"
TENDER_SUBDOMAIN = "edxedge"
# Update module store settings per defaults for tests
update_module_store_settings(
MODULESTORE,
module_store_options={
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': TEST_ROOT / "data",
},
doc_store_settings={
'db': 'test_xmodule',
'host': MONGO_HOST,
'port': MONGO_PORT_NUM,
'collection': 'test_modulestore{0}'.format(THIS_UUID),
},
)
CONTENTSTORE = {
'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore',
'DOC_STORE_CONFIG': {
'host': MONGO_HOST,
'db': 'test_xcontent',
'port': MONGO_PORT_NUM,
'collection': 'dont_trip',
},
# allow for additional options that can be keyed on a name, e.g. 'trashcan'
'ADDITIONAL_OPTIONS': {
'trashcan': {
'bucket': 'trash_fs'
}
}
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': TEST_ROOT / "db" / "cms.db",
},
}
LMS_BASE = "localhost:8000"
FEATURES['PREVIEW_LMS_BASE'] = "preview"
CACHES = {
# This is the cache used for most things. Askbot will not work without a
# functioning cache -- it relies on caching to load its settings in places.
# In staging/prod envs, the sessions also live here.
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_loc_mem_cache',
'KEY_FUNCTION': 'util.memcache.safe_key',
},
# The general cache is what you get if you use our util.cache. It's used for
# things like caching the course.xml file for different A/B test groups.
# We set it to be a DummyCache to force reloading of course.xml in dev.
# In staging environments, we would grab VERSION from data uploaded by the
# push process.
'general': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'KEY_PREFIX': 'general',
'VERSION': 4,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'mongo_metadata_inheritance': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': os.path.join(tempfile.gettempdir(), 'mongo_metadata_inheritance'),
'TIMEOUT': 300,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'loc_cache': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_location_mem_cache',
},
'course_structure_cache': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
}
# Add external_auth to Installed apps for testing
INSTALLED_APPS += ('external_auth', )
# Add milestones to Installed apps for testing
INSTALLED_APPS += ('milestones', 'openedx.core.djangoapps.call_stack_manager')
# hide ratelimit warnings while running tests
filterwarnings('ignore', message='No request passed to the backend, unable to rate-limit')
# Ignore deprecation warnings (so we don't clutter Jenkins builds/production)
# https://docs.python.org/2/library/warnings.html#the-warnings-filter
# Change to "default" to see the first instance of each hit
# or "error" to convert all into errors
simplefilter('ignore')
################################# CELERY ######################################
CELERY_ALWAYS_EAGER = True
CELERY_RESULT_BACKEND = 'djcelery.backends.cache:CacheBackend'
########################### Server Ports ###################################
# These ports are carefully chosen so that if the browser needs to
# access them, they will be available through the SauceLabs SSH tunnel
LETTUCE_SERVER_PORT = 8003
XQUEUE_PORT = 8040
YOUTUBE_PORT = 8031
LTI_PORT = 8765
VIDEO_SOURCE_PORT = 8777
################### Make tests faster
# http://slacy.com/blog/2012/04/make-your-tests-faster-in-django-1-4/
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
)
# dummy segment-io key
SEGMENT_IO_KEY = '***REMOVED***'
FEATURES['ENABLE_SERVICE_STATUS'] = True
# Toggles embargo on for testing
FEATURES['EMBARGO'] = True
# set up some testing for microsites
MICROSITE_CONFIGURATION = {
"test_microsite": {
"domain_prefix": "testmicrosite",
"university": "test_microsite",
"platform_name": "Test Microsite",
"logo_image_url": "test_microsite/images/header-logo.png",
"email_from_address": "test_microsite@edx.org",
"payment_support_email": "test_microsite@edx.org",
"ENABLE_MKTG_SITE": False,
"SITE_NAME": "test_microsite.localhost",
"course_org_filter": "TestMicrositeX",
"course_about_show_social_links": False,
"css_overrides_file": "test_microsite/css/test_microsite.css",
"show_partners": False,
"show_homepage_promo_video": False,
"course_index_overlay_text": "This is a Test Microsite Overlay Text.",
"course_index_overlay_logo_file": "test_microsite/images/header-logo.png",
"homepage_overlay_html": "<h1>This is a Test Microsite Overlay HTML</h1>"
},
"default": {
"university": "default_university",
"domain_prefix": "www",
}
}
MICROSITE_ROOT_DIR = COMMON_ROOT / 'test' / 'test_microsites'
FEATURES['USE_MICROSITES'] = True
# For consistency in user-experience, keep the value of this setting in sync with
# the one in lms/envs/test.py
FEATURES['ENABLE_DISCUSSION_SERVICE'] = False
# Enable a parental consent age limit for testing
PARENTAL_CONSENT_AGE_LIMIT = 13
# Enable content libraries code for the tests
FEATURES['ENABLE_CONTENT_LIBRARIES'] = True
FEATURES['ENABLE_EDXNOTES'] = True
# MILESTONES
FEATURES['MILESTONES_APP'] = True
# ENTRANCE EXAMS
FEATURES['ENTRANCE_EXAMS'] = True
ENTRANCE_EXAM_MIN_SCORE_PCT = 50
VIDEO_CDN_URL = {
'CN': 'http://api.xuetangx.com/edx/video?s3_url='
}
# Courseware Search Index
FEATURES['ENABLE_COURSEWARE_INDEX'] = True
FEATURES['ENABLE_LIBRARY_INDEX'] = True
SEARCH_ENGINE = "search.tests.mock_search_engine.MockSearchEngine"
# teams feature
FEATURES['ENABLE_TEAMS'] = True
# Dummy secret key for dev/test
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
| agpl-3.0 |
amyvmiwei/kbengine | kbe/src/lib/python/Lib/encodings/gb2312.py | 816 | 1027 | #
# gb2312.py: Python Unicode Codec for GB2312
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_cn, codecs
import _multibytecodec as mbc
codec = _codecs_cn.getcodec('gb2312')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='gb2312',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| lgpl-3.0 |
resmo/ansible | lib/ansible/modules/network/cloudengine/ce_reboot.py | 12 | 4188 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_reboot
version_added: 2.4
short_description: Reboot a HUAWEI CloudEngine switches.
description:
- Reboot a HUAWEI CloudEngine switches.
author: Gong Jianjun (@QijunPan)
requirements: ["ncclient"]
options:
confirm:
description:
- Safeguard boolean. Set to true if you're sure you want to reboot.
type: bool
required: true
save_config:
description:
- Flag indicating whether to save the configuration.
required: false
type: bool
default: false
'''
EXAMPLES = '''
- name: reboot module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Reboot the device
ce_reboot:
confirm: true
save_config: true
provider: "{{ cli }}"
'''
RETURN = '''
rebooted:
description: Whether the device was instructed to reboot.
returned: success
type: bool
sample: true
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import execute_nc_action, ce_argument_spec
try:
from ncclient.operations.errors import TimeoutExpiredError
HAS_NCCLIENT = True
except ImportError:
HAS_NCCLIENT = False
CE_NC_XML_EXECUTE_REBOOT = """
<action>
<devm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<reboot>
<saveConfig>%s</saveConfig>
</reboot>
</devm>
</action>
"""
class Reboot(object):
""" Reboot a network device """
def __init__(self, **kwargs):
""" __init___ """
self.network_module = None
self.netconf = None
self.init_network_module(**kwargs)
self.confirm = self.network_module.params['confirm']
self.save_config = self.network_module.params['save_config']
def init_network_module(self, **kwargs):
""" init network module """
self.network_module = AnsibleModule(**kwargs)
def netconf_set_action(self, xml_str):
""" netconf execute action """
try:
execute_nc_action(self.network_module, xml_str)
except TimeoutExpiredError:
pass
def work(self):
""" start to work """
if not self.confirm:
self.network_module.fail_json(
msg='Error: Confirm must be set to true for this module to work.')
xml_str = CE_NC_XML_EXECUTE_REBOOT % str(self.save_config).lower()
self.netconf_set_action(xml_str)
def main():
""" main """
argument_spec = dict(
confirm=dict(required=True, type='bool'),
save_config=dict(default=False, type='bool')
)
argument_spec.update(ce_argument_spec)
module = Reboot(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_NCCLIENT:
module.network_module.fail_json(msg='Error: The ncclient library is required.')
changed = False
rebooted = False
module.work()
changed = True
rebooted = True
results = dict()
results['changed'] = changed
results['rebooted'] = rebooted
module.network_module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
WhireCrow/openwrt-mt7620 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/test/test_weakref.py | 35 | 42138 | import gc
import sys
import unittest
import UserList
import weakref
import operator
from test import test_support
# Used in ReferencesTestCase.test_ref_created_during_del() .
ref_from_del = None
class C:
def method(self):
pass
class Callable:
bar = None
def __call__(self, x):
self.bar = x
def create_function():
def f(): pass
return f
def create_bound_method():
return C().method
def create_unbound_method():
return C.method
class TestBase(unittest.TestCase):
def setUp(self):
self.cbcalled = 0
def callback(self, ref):
self.cbcalled += 1
class ReferencesTestCase(TestBase):
def test_basic_ref(self):
self.check_basic_ref(C)
self.check_basic_ref(create_function)
self.check_basic_ref(create_bound_method)
self.check_basic_ref(create_unbound_method)
# Just make sure the tp_repr handler doesn't raise an exception.
# Live reference:
o = C()
wr = weakref.ref(o)
repr(wr)
# Dead reference:
del o
repr(wr)
def test_basic_callback(self):
self.check_basic_callback(C)
self.check_basic_callback(create_function)
self.check_basic_callback(create_bound_method)
self.check_basic_callback(create_unbound_method)
def test_multiple_callbacks(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del o
self.assertTrue(ref1() is None,
"expected reference to be invalidated")
self.assertTrue(ref2() is None,
"expected reference to be invalidated")
self.assertTrue(self.cbcalled == 2,
"callback not called the right number of times")
def test_multiple_selfref_callbacks(self):
# Make sure all references are invalidated before callbacks are called
#
# What's important here is that we're using the first
# reference in the callback invoked on the second reference
# (the most recently created ref is cleaned up first). This
# tests that all references to the object are invalidated
# before any of the callbacks are invoked, so that we only
# have one invocation of _weakref.c:cleanup_helper() active
# for a particular object at a time.
#
def callback(object, self=self):
self.ref()
c = C()
self.ref = weakref.ref(c, callback)
ref1 = weakref.ref(c, callback)
del c
def test_proxy_ref(self):
o = C()
o.bar = 1
ref1 = weakref.proxy(o, self.callback)
ref2 = weakref.proxy(o, self.callback)
del o
def check(proxy):
proxy.bar
self.assertRaises(weakref.ReferenceError, check, ref1)
self.assertRaises(weakref.ReferenceError, check, ref2)
self.assertRaises(weakref.ReferenceError, bool, weakref.proxy(C()))
self.assertTrue(self.cbcalled == 2)
def check_basic_ref(self, factory):
o = factory()
ref = weakref.ref(o)
self.assertTrue(ref() is not None,
"weak reference to live object should be live")
o2 = ref()
self.assertTrue(o is o2,
"<ref>() should return original object if live")
def check_basic_callback(self, factory):
self.cbcalled = 0
o = factory()
ref = weakref.ref(o, self.callback)
del o
self.assertTrue(self.cbcalled == 1,
"callback did not properly set 'cbcalled'")
self.assertTrue(ref() is None,
"ref2 should be dead after deleting object reference")
def test_ref_reuse(self):
o = C()
ref1 = weakref.ref(o)
# create a proxy to make sure that there's an intervening creation
# between these two; it should make no difference
proxy = weakref.proxy(o)
ref2 = weakref.ref(o)
self.assertTrue(ref1 is ref2,
"reference object w/out callback should be re-used")
o = C()
proxy = weakref.proxy(o)
ref1 = weakref.ref(o)
ref2 = weakref.ref(o)
self.assertTrue(ref1 is ref2,
"reference object w/out callback should be re-used")
self.assertTrue(weakref.getweakrefcount(o) == 2,
"wrong weak ref count for object")
del proxy
self.assertTrue(weakref.getweakrefcount(o) == 1,
"wrong weak ref count for object after deleting proxy")
def test_proxy_reuse(self):
o = C()
proxy1 = weakref.proxy(o)
ref = weakref.ref(o)
proxy2 = weakref.proxy(o)
self.assertTrue(proxy1 is proxy2,
"proxy object w/out callback should have been re-used")
def test_basic_proxy(self):
o = C()
self.check_proxy(o, weakref.proxy(o))
L = UserList.UserList()
p = weakref.proxy(L)
self.assertFalse(p, "proxy for empty UserList should be false")
p.append(12)
self.assertEqual(len(L), 1)
self.assertTrue(p, "proxy for non-empty UserList should be true")
with test_support.check_py3k_warnings():
p[:] = [2, 3]
self.assertEqual(len(L), 2)
self.assertEqual(len(p), 2)
self.assertIn(3, p, "proxy didn't support __contains__() properly")
p[1] = 5
self.assertEqual(L[1], 5)
self.assertEqual(p[1], 5)
L2 = UserList.UserList(L)
p2 = weakref.proxy(L2)
self.assertEqual(p, p2)
## self.assertEqual(repr(L2), repr(p2))
L3 = UserList.UserList(range(10))
p3 = weakref.proxy(L3)
with test_support.check_py3k_warnings():
self.assertEqual(L3[:], p3[:])
self.assertEqual(L3[5:], p3[5:])
self.assertEqual(L3[:5], p3[:5])
self.assertEqual(L3[2:5], p3[2:5])
def test_proxy_unicode(self):
# See bug 5037
class C(object):
def __str__(self):
return "string"
def __unicode__(self):
return u"unicode"
instance = C()
self.assertIn("__unicode__", dir(weakref.proxy(instance)))
self.assertEqual(unicode(weakref.proxy(instance)), u"unicode")
def test_proxy_index(self):
class C:
def __index__(self):
return 10
o = C()
p = weakref.proxy(o)
self.assertEqual(operator.index(p), 10)
def test_proxy_div(self):
class C:
def __floordiv__(self, other):
return 42
def __ifloordiv__(self, other):
return 21
o = C()
p = weakref.proxy(o)
self.assertEqual(p // 5, 42)
p //= 5
self.assertEqual(p, 21)
# The PyWeakref_* C API is documented as allowing either NULL or
# None as the value for the callback, where either means "no
# callback". The "no callback" ref and proxy objects are supposed
# to be shared so long as they exist by all callers so long as
# they are active. In Python 2.3.3 and earlier, this guarantee
# was not honored, and was broken in different ways for
# PyWeakref_NewRef() and PyWeakref_NewProxy(). (Two tests.)
def test_shared_ref_without_callback(self):
self.check_shared_without_callback(weakref.ref)
def test_shared_proxy_without_callback(self):
self.check_shared_without_callback(weakref.proxy)
def check_shared_without_callback(self, makeref):
o = Object(1)
p1 = makeref(o, None)
p2 = makeref(o, None)
self.assertTrue(p1 is p2, "both callbacks were None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o, None)
self.assertTrue(p1 is p2, "callbacks were NULL, None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o)
self.assertTrue(p1 is p2, "both callbacks were NULL in the C API")
del p1, p2
p1 = makeref(o, None)
p2 = makeref(o)
self.assertTrue(p1 is p2, "callbacks were None, NULL in the C API")
def test_callable_proxy(self):
o = Callable()
ref1 = weakref.proxy(o)
self.check_proxy(o, ref1)
self.assertTrue(type(ref1) is weakref.CallableProxyType,
"proxy is not of callable type")
ref1('twinkies!')
self.assertTrue(o.bar == 'twinkies!',
"call through proxy not passed through to original")
ref1(x='Splat.')
self.assertTrue(o.bar == 'Splat.',
"call through proxy not passed through to original")
# expect due to too few args
self.assertRaises(TypeError, ref1)
# expect due to too many args
self.assertRaises(TypeError, ref1, 1, 2, 3)
def check_proxy(self, o, proxy):
o.foo = 1
self.assertTrue(proxy.foo == 1,
"proxy does not reflect attribute addition")
o.foo = 2
self.assertTrue(proxy.foo == 2,
"proxy does not reflect attribute modification")
del o.foo
self.assertTrue(not hasattr(proxy, 'foo'),
"proxy does not reflect attribute removal")
proxy.foo = 1
self.assertTrue(o.foo == 1,
"object does not reflect attribute addition via proxy")
proxy.foo = 2
self.assertTrue(
o.foo == 2,
"object does not reflect attribute modification via proxy")
del proxy.foo
self.assertTrue(not hasattr(o, 'foo'),
"object does not reflect attribute removal via proxy")
def test_proxy_deletion(self):
# Test clearing of SF bug #762891
class Foo:
result = None
def __delitem__(self, accessor):
self.result = accessor
g = Foo()
f = weakref.proxy(g)
del f[0]
self.assertEqual(f.result, 0)
def test_proxy_bool(self):
# Test clearing of SF bug #1170766
class List(list): pass
lyst = List()
self.assertEqual(bool(weakref.proxy(lyst)), bool(lyst))
def test_getweakrefcount(self):
o = C()
ref1 = weakref.ref(o)
ref2 = weakref.ref(o, self.callback)
self.assertTrue(weakref.getweakrefcount(o) == 2,
"got wrong number of weak reference objects")
proxy1 = weakref.proxy(o)
proxy2 = weakref.proxy(o, self.callback)
self.assertTrue(weakref.getweakrefcount(o) == 4,
"got wrong number of weak reference objects")
del ref1, ref2, proxy1, proxy2
self.assertTrue(weakref.getweakrefcount(o) == 0,
"weak reference objects not unlinked from"
" referent when discarded.")
# assumes ints do not support weakrefs
self.assertTrue(weakref.getweakrefcount(1) == 0,
"got wrong number of weak reference objects for int")
def test_getweakrefs(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref1
self.assertTrue(weakref.getweakrefs(o) == [ref2],
"list of refs does not match")
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref2
self.assertTrue(weakref.getweakrefs(o) == [ref1],
"list of refs does not match")
del ref1
self.assertTrue(weakref.getweakrefs(o) == [],
"list of refs not cleared")
# assumes ints do not support weakrefs
self.assertTrue(weakref.getweakrefs(1) == [],
"list of refs does not match for int")
def test_newstyle_number_ops(self):
class F(float):
pass
f = F(2.0)
p = weakref.proxy(f)
self.assertTrue(p + 1.0 == 3.0)
self.assertTrue(1.0 + p == 3.0) # this used to SEGV
def test_callbacks_protected(self):
# Callbacks protected from already-set exceptions?
# Regression test for SF bug #478534.
class BogusError(Exception):
pass
data = {}
def remove(k):
del data[k]
def encapsulate():
f = lambda : ()
data[weakref.ref(f, remove)] = None
raise BogusError
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
def test_sf_bug_840829(self):
# "weakref callbacks and gc corrupt memory"
# subtype_dealloc erroneously exposed a new-style instance
# already in the process of getting deallocated to gc,
# causing double-deallocation if the instance had a weakref
# callback that triggered gc.
# If the bug exists, there probably won't be an obvious symptom
# in a release build. In a debug build, a segfault will occur
# when the second attempt to remove the instance from the "list
# of all objects" occurs.
import gc
class C(object):
pass
c = C()
wr = weakref.ref(c, lambda ignore: gc.collect())
del c
# There endeth the first part. It gets worse.
del wr
c1 = C()
c1.i = C()
wr = weakref.ref(c1.i, lambda ignore: gc.collect())
c2 = C()
c2.c1 = c1
del c1 # still alive because c2 points to it
# Now when subtype_dealloc gets called on c2, it's not enough just
# that c2 is immune from gc while the weakref callbacks associated
# with c2 execute (there are none in this 2nd half of the test, btw).
# subtype_dealloc goes on to call the base classes' deallocs too,
# so any gc triggered by weakref callbacks associated with anything
# torn down by a base class dealloc can also trigger double
# deallocation of c2.
del c2
def test_callback_in_cycle_1(self):
import gc
class J(object):
pass
class II(object):
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
# Now J and II are each in a self-cycle (as all new-style class
# objects are, since their __mro__ points back to them). I holds
# both a weak reference (I.wr) and a strong reference (I.J) to class
# J. I is also in a cycle (I.wr points to a weakref that references
# I.acallback). When we del these three, they all become trash, but
# the cycles prevent any of them from getting cleaned up immediately.
# Instead they have to wait for cyclic gc to deduce that they're
# trash.
#
# gc used to call tp_clear on all of them, and the order in which
# it does that is pretty accidental. The exact order in which we
# built up these things manages to provoke gc into running tp_clear
# in just the right order (I last). Calling tp_clear on II leaves
# behind an insane class object (its __mro__ becomes NULL). Calling
# tp_clear on J breaks its self-cycle, but J doesn't get deleted
# just then because of the strong reference from I.J. Calling
# tp_clear on I starts to clear I's __dict__, and just happens to
# clear I.J first -- I.wr is still intact. That removes the last
# reference to J, which triggers the weakref callback. The callback
# tries to do "self.J", and instances of new-style classes look up
# attributes ("J") in the class dict first. The class (II) wants to
# search II.__mro__, but that's NULL. The result was a segfault in
# a release build, and an assert failure in a debug build.
del I, J, II
gc.collect()
def test_callback_in_cycle_2(self):
import gc
# This is just like test_callback_in_cycle_1, except that II is an
# old-style class. The symptom is different then: an instance of an
# old-style class looks in its own __dict__ first. 'J' happens to
# get cleared from I.__dict__ before 'wr', and 'J' was never in II's
# __dict__, so the attribute isn't found. The difference is that
# the old-style II doesn't have a NULL __mro__ (it doesn't have any
# __mro__), so no segfault occurs. Instead it got:
# test_callback_in_cycle_2 (__main__.ReferencesTestCase) ...
# Exception exceptions.AttributeError:
# "II instance has no attribute 'J'" in <bound method II.acallback
# of <?.II instance at 0x00B9B4B8>> ignored
class J(object):
pass
class II:
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
del I, J, II
gc.collect()
def test_callback_in_cycle_3(self):
import gc
# This one broke the first patch that fixed the last two. In this
# case, the objects reachable from the callback aren't also reachable
# from the object (c1) *triggering* the callback: you can get to
# c1 from c2, but not vice-versa. The result was that c2's __dict__
# got tp_clear'ed by the time the c2.cb callback got invoked.
class C:
def cb(self, ignore):
self.me
self.c1
self.wr
c1, c2 = C(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2
gc.collect()
def test_callback_in_cycle_4(self):
import gc
# Like test_callback_in_cycle_3, except c2 and c1 have different
# classes. c2's class (C) isn't reachable from c1 then, so protecting
# objects reachable from the dying object (c1) isn't enough to stop
# c2's class (C) from getting tp_clear'ed before c2.cb is invoked.
# The result was a segfault (C.__mro__ was NULL when the callback
# tried to look up self.me).
class C(object):
def cb(self, ignore):
self.me
self.c1
self.wr
class D:
pass
c1, c2 = D(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2, C, D
gc.collect()
def test_callback_in_cycle_resurrection(self):
import gc
# Do something nasty in a weakref callback: resurrect objects
# from dead cycles. For this to be attempted, the weakref and
# its callback must also be part of the cyclic trash (else the
# objects reachable via the callback couldn't be in cyclic trash
# to begin with -- the callback would act like an external root).
# But gc clears trash weakrefs with callbacks early now, which
# disables the callbacks, so the callbacks shouldn't get called
# at all (and so nothing actually gets resurrected).
alist = []
class C(object):
def __init__(self, value):
self.attribute = value
def acallback(self, ignore):
alist.append(self.c)
c1, c2 = C(1), C(2)
c1.c = c2
c2.c = c1
c1.wr = weakref.ref(c2, c1.acallback)
c2.wr = weakref.ref(c1, c2.acallback)
def C_went_away(ignore):
alist.append("C went away")
wr = weakref.ref(C, C_went_away)
del c1, c2, C # make them all trash
self.assertEqual(alist, []) # del isn't enough to reclaim anything
gc.collect()
# c1.wr and c2.wr were part of the cyclic trash, so should have
# been cleared without their callbacks executing. OTOH, the weakref
# to C is bound to a function local (wr), and wasn't trash, so that
# callback should have been invoked when C went away.
self.assertEqual(alist, ["C went away"])
# The remaining weakref should be dead now (its callback ran).
self.assertEqual(wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_callbacks_on_callback(self):
import gc
# Set up weakref callbacks *on* weakref callbacks.
alist = []
def safe_callback(ignore):
alist.append("safe_callback called")
class C(object):
def cb(self, ignore):
alist.append("cb called")
c, d = C(), C()
c.other = d
d.other = c
callback = c.cb
c.wr = weakref.ref(d, callback) # this won't trigger
d.wr = weakref.ref(callback, d.cb) # ditto
external_wr = weakref.ref(callback, safe_callback) # but this will
self.assertTrue(external_wr() is callback)
# The weakrefs attached to c and d should get cleared, so that
# C.cb is never called. But external_wr isn't part of the cyclic
# trash, and no cyclic trash is reachable from it, so safe_callback
# should get invoked when the bound method object callback (c.cb)
# -- which is itself a callback, and also part of the cyclic trash --
# gets reclaimed at the end of gc.
del callback, c, d, C
self.assertEqual(alist, []) # del isn't enough to clean up cycles
gc.collect()
self.assertEqual(alist, ["safe_callback called"])
self.assertEqual(external_wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_gc_during_ref_creation(self):
self.check_gc_during_creation(weakref.ref)
def test_gc_during_proxy_creation(self):
self.check_gc_during_creation(weakref.proxy)
def check_gc_during_creation(self, makeref):
thresholds = gc.get_threshold()
gc.set_threshold(1, 1, 1)
gc.collect()
class A:
pass
def callback(*args):
pass
referenced = A()
a = A()
a.a = a
a.wr = makeref(referenced)
try:
# now make sure the object and the ref get labeled as
# cyclic trash:
a = A()
weakref.ref(referenced, callback)
finally:
gc.set_threshold(*thresholds)
def test_ref_created_during_del(self):
# Bug #1377858
# A weakref created in an object's __del__() would crash the
# interpreter when the weakref was cleaned up since it would refer to
# non-existent memory. This test should not segfault the interpreter.
class Target(object):
def __del__(self):
global ref_from_del
ref_from_del = weakref.ref(self)
w = Target()
def test_init(self):
# Issue 3634
# <weakref to class>.__init__() doesn't check errors correctly
r = weakref.ref(Exception)
self.assertRaises(TypeError, r.__init__, 0, 0, 0, 0, 0)
# No exception should be raised here
gc.collect()
def test_classes(self):
# Check that both old-style classes and new-style classes
# are weakrefable.
class A(object):
pass
class B:
pass
l = []
weakref.ref(int)
a = weakref.ref(A, l.append)
A = None
gc.collect()
self.assertEqual(a(), None)
self.assertEqual(l, [a])
b = weakref.ref(B, l.append)
B = None
gc.collect()
self.assertEqual(b(), None)
self.assertEqual(l, [a, b])
class SubclassableWeakrefTestCase(TestBase):
def test_subclass_refs(self):
class MyRef(weakref.ref):
def __init__(self, ob, callback=None, value=42):
self.value = value
super(MyRef, self).__init__(ob, callback)
def __call__(self):
self.called = True
return super(MyRef, self).__call__()
o = Object("foo")
mr = MyRef(o, value=24)
self.assertTrue(mr() is o)
self.assertTrue(mr.called)
self.assertEqual(mr.value, 24)
del o
self.assertTrue(mr() is None)
self.assertTrue(mr.called)
def test_subclass_refs_dont_replace_standard_refs(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o)
r2 = weakref.ref(o)
self.assertTrue(r1 is not r2)
self.assertEqual(weakref.getweakrefs(o), [r2, r1])
self.assertEqual(weakref.getweakrefcount(o), 2)
r3 = MyRef(o)
self.assertEqual(weakref.getweakrefcount(o), 3)
refs = weakref.getweakrefs(o)
self.assertEqual(len(refs), 3)
self.assertTrue(r2 is refs[0])
self.assertIn(r1, refs[1:])
self.assertIn(r3, refs[1:])
def test_subclass_refs_dont_conflate_callbacks(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o, id)
r2 = MyRef(o, str)
self.assertTrue(r1 is not r2)
refs = weakref.getweakrefs(o)
self.assertIn(r1, refs)
self.assertIn(r2, refs)
def test_subclass_refs_with_slots(self):
class MyRef(weakref.ref):
__slots__ = "slot1", "slot2"
def __new__(type, ob, callback, slot1, slot2):
return weakref.ref.__new__(type, ob, callback)
def __init__(self, ob, callback, slot1, slot2):
self.slot1 = slot1
self.slot2 = slot2
def meth(self):
return self.slot1 + self.slot2
o = Object(42)
r = MyRef(o, None, "abc", "def")
self.assertEqual(r.slot1, "abc")
self.assertEqual(r.slot2, "def")
self.assertEqual(r.meth(), "abcdef")
self.assertFalse(hasattr(r, "__dict__"))
def test_subclass_refs_with_cycle(self):
# Bug #3110
# An instance of a weakref subclass can have attributes.
# If such a weakref holds the only strong reference to the object,
# deleting the weakref will delete the object. In this case,
# the callback must not be called, because the ref object is
# being deleted.
class MyRef(weakref.ref):
pass
# Use a local callback, for "regrtest -R::"
# to detect refcounting problems
def callback(w):
self.cbcalled += 1
o = C()
r1 = MyRef(o, callback)
r1.o = o
del o
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
# Same test, with two weakrefs to the same object
# (since code paths are different)
o = C()
r1 = MyRef(o, callback)
r2 = MyRef(o, callback)
r1.r = r2
r2.o = o
del o
del r2
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
class Object:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return "<Object %r>" % self.arg
class MappingTestCase(TestBase):
COUNT = 10
def test_weak_values(self):
#
# This exercises d.copy(), d.items(), d[], del d[], len(d).
#
dict, objects = self.make_weak_valued_dict()
for o in objects:
self.assertTrue(weakref.getweakrefcount(o) == 1,
"wrong number of weak references to %r!" % o)
self.assertTrue(o is dict[o.arg],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
items1.sort()
items2.sort()
self.assertTrue(items1 == items2,
"cloning of weak-valued dictionary did not work!")
del items1, items2
self.assertTrue(len(dict) == self.COUNT)
del objects[0]
self.assertTrue(len(dict) == (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
self.assertTrue(len(dict) == 0,
"deleting the values did not clear the dictionary")
# regression on SF bug #447152:
dict = weakref.WeakValueDictionary()
self.assertRaises(KeyError, dict.__getitem__, 1)
dict[2] = C()
self.assertRaises(KeyError, dict.__getitem__, 2)
def test_weak_keys(self):
#
# This exercises d.copy(), d.items(), d[] = v, d[], del d[],
# len(d), in d.
#
dict, objects = self.make_weak_keyed_dict()
for o in objects:
self.assertTrue(weakref.getweakrefcount(o) == 1,
"wrong number of weak references to %r!" % o)
self.assertTrue(o.arg is dict[o],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
self.assertTrue(set(items1) == set(items2),
"cloning of weak-keyed dictionary did not work!")
del items1, items2
self.assertTrue(len(dict) == self.COUNT)
del objects[0]
self.assertTrue(len(dict) == (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
self.assertTrue(len(dict) == 0,
"deleting the keys did not clear the dictionary")
o = Object(42)
dict[o] = "What is the meaning of the universe?"
self.assertIn(o, dict)
self.assertNotIn(34, dict)
def test_weak_keyed_iters(self):
dict, objects = self.make_weak_keyed_dict()
self.check_iters(dict)
# Test keyrefs()
refs = dict.keyrefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test iterkeyrefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.iterkeyrefs())), len(objects))
for wr in dict.iterkeyrefs():
ob = wr()
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def test_weak_valued_iters(self):
dict, objects = self.make_weak_valued_dict()
self.check_iters(dict)
# Test valuerefs()
refs = dict.valuerefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test itervaluerefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.itervaluerefs())), len(objects))
for wr in dict.itervaluerefs():
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def check_iters(self, dict):
# item iterator:
items = dict.items()
for item in dict.iteritems():
items.remove(item)
self.assertTrue(len(items) == 0, "iteritems() did not touch all items")
# key iterator, via __iter__():
keys = dict.keys()
for k in dict:
keys.remove(k)
self.assertTrue(len(keys) == 0, "__iter__() did not touch all keys")
# key iterator, via iterkeys():
keys = dict.keys()
for k in dict.iterkeys():
keys.remove(k)
self.assertTrue(len(keys) == 0, "iterkeys() did not touch all keys")
# value iterator:
values = dict.values()
for v in dict.itervalues():
values.remove(v)
self.assertTrue(len(values) == 0,
"itervalues() did not touch all values")
def test_make_weak_keyed_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
self.assertTrue(dict[o] == 364)
def test_make_weak_keyed_dict_from_weak_keyed_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
dict2 = weakref.WeakKeyDictionary(dict)
self.assertTrue(dict[o] == 364)
def make_weak_keyed_dict(self):
dict = weakref.WeakKeyDictionary()
objects = map(Object, range(self.COUNT))
for o in objects:
dict[o] = o.arg
return dict, objects
def make_weak_valued_dict(self):
dict = weakref.WeakValueDictionary()
objects = map(Object, range(self.COUNT))
for o in objects:
dict[o.arg] = o
return dict, objects
def check_popitem(self, klass, key1, value1, key2, value2):
weakdict = klass()
weakdict[key1] = value1
weakdict[key2] = value2
self.assertTrue(len(weakdict) == 2)
k, v = weakdict.popitem()
self.assertTrue(len(weakdict) == 1)
if k is key1:
self.assertTrue(v is value1)
else:
self.assertTrue(v is value2)
k, v = weakdict.popitem()
self.assertTrue(len(weakdict) == 0)
if k is key1:
self.assertTrue(v is value1)
else:
self.assertTrue(v is value2)
def test_weak_valued_dict_popitem(self):
self.check_popitem(weakref.WeakValueDictionary,
"key1", C(), "key2", C())
def test_weak_keyed_dict_popitem(self):
self.check_popitem(weakref.WeakKeyDictionary,
C(), "value 1", C(), "value 2")
def check_setdefault(self, klass, key, value1, value2):
self.assertTrue(value1 is not value2,
"invalid test"
" -- value parameters must be distinct objects")
weakdict = klass()
o = weakdict.setdefault(key, value1)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
o = weakdict.setdefault(key, value2)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
def test_weak_valued_dict_setdefault(self):
self.check_setdefault(weakref.WeakValueDictionary,
"key", C(), C())
def test_weak_keyed_dict_setdefault(self):
self.check_setdefault(weakref.WeakKeyDictionary,
C(), "value 1", "value 2")
def check_update(self, klass, dict):
#
# This exercises d.update(), len(d), d.keys(), in d,
# d.get(), d[].
#
weakdict = klass()
weakdict.update(dict)
self.assertEqual(len(weakdict), len(dict))
for k in weakdict.keys():
self.assertIn(k, dict,
"mysterious new key appeared in weak dict")
v = dict.get(k)
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
for k in dict.keys():
self.assertIn(k, weakdict,
"original key disappeared in weak dict")
v = dict[k]
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
def test_weak_valued_dict_update(self):
self.check_update(weakref.WeakValueDictionary,
{1: C(), 'a': C(), C(): C()})
def test_weak_keyed_dict_update(self):
self.check_update(weakref.WeakKeyDictionary,
{C(): 1, C(): 2, C(): 3})
def test_weak_keyed_delitem(self):
d = weakref.WeakKeyDictionary()
o1 = Object('1')
o2 = Object('2')
d[o1] = 'something'
d[o2] = 'something'
self.assertTrue(len(d) == 2)
del d[o1]
self.assertTrue(len(d) == 1)
self.assertTrue(d.keys() == [o2])
def test_weak_valued_delitem(self):
d = weakref.WeakValueDictionary()
o1 = Object('1')
o2 = Object('2')
d['something'] = o1
d['something else'] = o2
self.assertTrue(len(d) == 2)
del d['something']
self.assertTrue(len(d) == 1)
self.assertTrue(d.items() == [('something else', o2)])
def test_weak_keyed_bad_delitem(self):
d = weakref.WeakKeyDictionary()
o = Object('1')
# An attempt to delete an object that isn't there should raise
# KeyError. It didn't before 2.3.
self.assertRaises(KeyError, d.__delitem__, o)
self.assertRaises(KeyError, d.__getitem__, o)
# If a key isn't of a weakly referencable type, __getitem__ and
# __setitem__ raise TypeError. __delitem__ should too.
self.assertRaises(TypeError, d.__delitem__, 13)
self.assertRaises(TypeError, d.__getitem__, 13)
self.assertRaises(TypeError, d.__setitem__, 13, 13)
def test_weak_keyed_cascading_deletes(self):
# SF bug 742860. For some reason, before 2.3 __delitem__ iterated
# over the keys via self.data.iterkeys(). If things vanished from
# the dict during this (or got added), that caused a RuntimeError.
d = weakref.WeakKeyDictionary()
mutate = False
class C(object):
def __init__(self, i):
self.value = i
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if mutate:
# Side effect that mutates the dict, by removing the
# last strong reference to a key.
del objs[-1]
return self.value == other.value
objs = [C(i) for i in range(4)]
for o in objs:
d[o] = o.value
del o # now the only strong references to keys are in objs
# Find the order in which iterkeys sees the keys.
objs = d.keys()
# Reverse it, so that the iteration implementation of __delitem__
# has to keep looping to find the first object we delete.
objs.reverse()
# Turn on mutation in C.__eq__. The first time thru the loop,
# under the iterkeys() business the first comparison will delete
# the last item iterkeys() would see, and that causes a
# RuntimeError: dictionary changed size during iteration
# when the iterkeys() loop goes around to try comparing the next
# key. After this was fixed, it just deletes the last object *our*
# "for o in obj" loop would have gotten to.
mutate = True
count = 0
for o in objs:
count += 1
del d[o]
self.assertEqual(len(d), 0)
self.assertEqual(count, 2)
from test import mapping_tests
class WeakValueDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakValueDictionary conforms to the mapping protocol"""
__ref = {"key1":Object(1), "key2":Object(2), "key3":Object(3)}
type2test = weakref.WeakValueDictionary
def _reference(self):
return self.__ref.copy()
class WeakKeyDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakKeyDictionary conforms to the mapping protocol"""
__ref = {Object("key1"):1, Object("key2"):2, Object("key3"):3}
type2test = weakref.WeakKeyDictionary
def _reference(self):
return self.__ref.copy()
libreftest = """ Doctest for examples in the library reference: weakref.rst
>>> import weakref
>>> class Dict(dict):
... pass
...
>>> obj = Dict(red=1, green=2, blue=3) # this object is weak referencable
>>> r = weakref.ref(obj)
>>> print r() is obj
True
>>> import weakref
>>> class Object:
... pass
...
>>> o = Object()
>>> r = weakref.ref(o)
>>> o2 = r()
>>> o is o2
True
>>> del o, o2
>>> print r()
None
>>> import weakref
>>> class ExtendedRef(weakref.ref):
... def __init__(self, ob, callback=None, **annotations):
... super(ExtendedRef, self).__init__(ob, callback)
... self.__counter = 0
... for k, v in annotations.iteritems():
... setattr(self, k, v)
... def __call__(self):
... '''Return a pair containing the referent and the number of
... times the reference has been called.
... '''
... ob = super(ExtendedRef, self).__call__()
... if ob is not None:
... self.__counter += 1
... ob = (ob, self.__counter)
... return ob
...
>>> class A: # not in docs from here, just testing the ExtendedRef
... pass
...
>>> a = A()
>>> r = ExtendedRef(a, foo=1, bar="baz")
>>> r.foo
1
>>> r.bar
'baz'
>>> r()[1]
1
>>> r()[1]
2
>>> r()[0] is a
True
>>> import weakref
>>> _id2obj_dict = weakref.WeakValueDictionary()
>>> def remember(obj):
... oid = id(obj)
... _id2obj_dict[oid] = obj
... return oid
...
>>> def id2obj(oid):
... return _id2obj_dict[oid]
...
>>> a = A() # from here, just testing
>>> a_id = remember(a)
>>> id2obj(a_id) is a
True
>>> del a
>>> try:
... id2obj(a_id)
... except KeyError:
... print 'OK'
... else:
... print 'WeakValueDictionary error'
OK
"""
__test__ = {'libreftest' : libreftest}
def test_main():
test_support.run_unittest(
ReferencesTestCase,
MappingTestCase,
WeakValueDictionaryTestCase,
WeakKeyDictionaryTestCase,
SubclassableWeakrefTestCase,
)
test_support.run_doctest(sys.modules[__name__])
if __name__ == "__main__":
test_main()
| gpl-2.0 |
WhireCrow/openwrt-mt7620 | staging_dir/host/lib/python2.7/xmllib.py | 227 | 34865 | """A parser for XML, using the derived class as static DTD."""
# Author: Sjoerd Mullender.
import re
import string
import warnings
warnings.warn("The xmllib module is obsolete. Use xml.sax instead.",
DeprecationWarning, 2)
del warnings
version = '0.3'
class Error(RuntimeError):
pass
# Regular expressions used for parsing
_S = '[ \t\r\n]+' # white space
_opS = '[ \t\r\n]*' # optional white space
_Name = '[a-zA-Z_:][-a-zA-Z0-9._:]*' # valid XML name
_QStr = "(?:'[^']*'|\"[^\"]*\")" # quoted XML string
illegal = re.compile('[^\t\r\n -\176\240-\377]') # illegal chars in content
interesting = re.compile('[]&<]')
amp = re.compile('&')
ref = re.compile('&(' + _Name + '|#[0-9]+|#x[0-9a-fA-F]+)[^-a-zA-Z0-9._:]')
entityref = re.compile('&(?P<name>' + _Name + ')[^-a-zA-Z0-9._:]')
charref = re.compile('&#(?P<char>[0-9]+[^0-9]|x[0-9a-fA-F]+[^0-9a-fA-F])')
space = re.compile(_S + '$')
newline = re.compile('\n')
attrfind = re.compile(
_S + '(?P<name>' + _Name + ')'
'(' + _opS + '=' + _opS +
'(?P<value>'+_QStr+'|[-a-zA-Z0-9.:+*%?!\(\)_#=~]+))?')
starttagopen = re.compile('<' + _Name)
starttagend = re.compile(_opS + '(?P<slash>/?)>')
starttagmatch = re.compile('<(?P<tagname>'+_Name+')'
'(?P<attrs>(?:'+attrfind.pattern+')*)'+
starttagend.pattern)
endtagopen = re.compile('</')
endbracket = re.compile(_opS + '>')
endbracketfind = re.compile('(?:[^>\'"]|'+_QStr+')*>')
tagfind = re.compile(_Name)
cdataopen = re.compile(r'<!\[CDATA\[')
cdataclose = re.compile(r'\]\]>')
# this matches one of the following:
# SYSTEM SystemLiteral
# PUBLIC PubidLiteral SystemLiteral
_SystemLiteral = '(?P<%s>'+_QStr+')'
_PublicLiteral = '(?P<%s>"[-\'\(\)+,./:=?;!*#@$_%% \n\ra-zA-Z0-9]*"|' \
"'[-\(\)+,./:=?;!*#@$_%% \n\ra-zA-Z0-9]*')"
_ExternalId = '(?:SYSTEM|' \
'PUBLIC'+_S+_PublicLiteral%'pubid'+ \
')'+_S+_SystemLiteral%'syslit'
doctype = re.compile('<!DOCTYPE'+_S+'(?P<name>'+_Name+')'
'(?:'+_S+_ExternalId+')?'+_opS)
xmldecl = re.compile('<\?xml'+_S+
'version'+_opS+'='+_opS+'(?P<version>'+_QStr+')'+
'(?:'+_S+'encoding'+_opS+'='+_opS+
"(?P<encoding>'[A-Za-z][-A-Za-z0-9._]*'|"
'"[A-Za-z][-A-Za-z0-9._]*"))?'
'(?:'+_S+'standalone'+_opS+'='+_opS+
'(?P<standalone>\'(?:yes|no)\'|"(?:yes|no)"))?'+
_opS+'\?>')
procopen = re.compile(r'<\?(?P<proc>' + _Name + ')' + _opS)
procclose = re.compile(_opS + r'\?>')
commentopen = re.compile('<!--')
commentclose = re.compile('-->')
doubledash = re.compile('--')
attrtrans = string.maketrans(' \r\n\t', ' ')
# definitions for XML namespaces
_NCName = '[a-zA-Z_][-a-zA-Z0-9._]*' # XML Name, minus the ":"
ncname = re.compile(_NCName + '$')
qname = re.compile('(?:(?P<prefix>' + _NCName + '):)?' # optional prefix
'(?P<local>' + _NCName + ')$')
xmlns = re.compile('xmlns(?::(?P<ncname>'+_NCName+'))?$')
# XML parser base class -- find tags and call handler functions.
# Usage: p = XMLParser(); p.feed(data); ...; p.close().
# The dtd is defined by deriving a class which defines methods with
# special names to handle tags: start_foo and end_foo to handle <foo>
# and </foo>, respectively. The data between tags is passed to the
# parser by calling self.handle_data() with some data as argument (the
# data may be split up in arbitrary chunks).
class XMLParser:
attributes = {} # default, to be overridden
elements = {} # default, to be overridden
# parsing options, settable using keyword args in __init__
__accept_unquoted_attributes = 0
__accept_missing_endtag_name = 0
__map_case = 0
__accept_utf8 = 0
__translate_attribute_references = 1
# Interface -- initialize and reset this instance
def __init__(self, **kw):
self.__fixed = 0
if 'accept_unquoted_attributes' in kw:
self.__accept_unquoted_attributes = kw['accept_unquoted_attributes']
if 'accept_missing_endtag_name' in kw:
self.__accept_missing_endtag_name = kw['accept_missing_endtag_name']
if 'map_case' in kw:
self.__map_case = kw['map_case']
if 'accept_utf8' in kw:
self.__accept_utf8 = kw['accept_utf8']
if 'translate_attribute_references' in kw:
self.__translate_attribute_references = kw['translate_attribute_references']
self.reset()
def __fixelements(self):
self.__fixed = 1
self.elements = {}
self.__fixdict(self.__dict__)
self.__fixclass(self.__class__)
def __fixclass(self, kl):
self.__fixdict(kl.__dict__)
for k in kl.__bases__:
self.__fixclass(k)
def __fixdict(self, dict):
for key in dict.keys():
if key[:6] == 'start_':
tag = key[6:]
start, end = self.elements.get(tag, (None, None))
if start is None:
self.elements[tag] = getattr(self, key), end
elif key[:4] == 'end_':
tag = key[4:]
start, end = self.elements.get(tag, (None, None))
if end is None:
self.elements[tag] = start, getattr(self, key)
# Interface -- reset this instance. Loses all unprocessed data
def reset(self):
self.rawdata = ''
self.stack = []
self.nomoretags = 0
self.literal = 0
self.lineno = 1
self.__at_start = 1
self.__seen_doctype = None
self.__seen_starttag = 0
self.__use_namespaces = 0
self.__namespaces = {'xml':None} # xml is implicitly declared
# backward compatibility hack: if elements not overridden,
# fill it in ourselves
if self.elements is XMLParser.elements:
self.__fixelements()
# For derived classes only -- enter literal mode (CDATA) till EOF
def setnomoretags(self):
self.nomoretags = self.literal = 1
# For derived classes only -- enter literal mode (CDATA)
def setliteral(self, *args):
self.literal = 1
# Interface -- feed some data to the parser. Call this as
# often as you want, with as little or as much text as you
# want (may include '\n'). (This just saves the text, all the
# processing is done by goahead().)
def feed(self, data):
self.rawdata = self.rawdata + data
self.goahead(0)
# Interface -- handle the remaining data
def close(self):
self.goahead(1)
if self.__fixed:
self.__fixed = 0
# remove self.elements so that we don't leak
del self.elements
# Interface -- translate references
def translate_references(self, data, all = 1):
if not self.__translate_attribute_references:
return data
i = 0
while 1:
res = amp.search(data, i)
if res is None:
return data
s = res.start(0)
res = ref.match(data, s)
if res is None:
self.syntax_error("bogus `&'")
i = s+1
continue
i = res.end(0)
str = res.group(1)
rescan = 0
if str[0] == '#':
if str[1] == 'x':
str = chr(int(str[2:], 16))
else:
str = chr(int(str[1:]))
if data[i - 1] != ';':
self.syntax_error("`;' missing after char reference")
i = i-1
elif all:
if str in self.entitydefs:
str = self.entitydefs[str]
rescan = 1
elif data[i - 1] != ';':
self.syntax_error("bogus `&'")
i = s + 1 # just past the &
continue
else:
self.syntax_error("reference to unknown entity `&%s;'" % str)
str = '&' + str + ';'
elif data[i - 1] != ';':
self.syntax_error("bogus `&'")
i = s + 1 # just past the &
continue
# when we get here, str contains the translated text and i points
# to the end of the string that is to be replaced
data = data[:s] + str + data[i:]
if rescan:
i = s
else:
i = s + len(str)
# Interface - return a dictionary of all namespaces currently valid
def getnamespace(self):
nsdict = {}
for t, d, nst in self.stack:
nsdict.update(d)
return nsdict
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if i > 0:
self.__at_start = 0
if self.nomoretags:
data = rawdata[i:n]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = n
break
res = interesting.search(rawdata, i)
if res:
j = res.start(0)
else:
j = n
if i < j:
data = rawdata[i:j]
if self.__at_start and space.match(data) is None:
self.syntax_error('illegal data at start of file')
self.__at_start = 0
if not self.stack and space.match(data) is None:
self.syntax_error('data not in content')
if not self.__accept_utf8 and illegal.search(data):
self.syntax_error('illegal character in content')
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = j
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i):
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = i+1
continue
k = self.parse_starttag(i)
if k < 0: break
self.__seen_starttag = 1
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
if endtagopen.match(rawdata, i):
k = self.parse_endtag(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
if commentopen.match(rawdata, i):
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = i+1
continue
k = self.parse_comment(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
if cdataopen.match(rawdata, i):
k = self.parse_cdata(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
res = xmldecl.match(rawdata, i)
if res:
if not self.__at_start:
self.syntax_error("<?xml?> declaration not at start of document")
version, encoding, standalone = res.group('version',
'encoding',
'standalone')
if version[1:-1] != '1.0':
raise Error('only XML version 1.0 supported')
if encoding: encoding = encoding[1:-1]
if standalone: standalone = standalone[1:-1]
self.handle_xml(encoding, standalone)
i = res.end(0)
continue
res = procopen.match(rawdata, i)
if res:
k = self.parse_proc(i)
if k < 0: break
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
res = doctype.match(rawdata, i)
if res:
if self.literal:
data = rawdata[i]
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
i = i+1
continue
if self.__seen_doctype:
self.syntax_error('multiple DOCTYPE elements')
if self.__seen_starttag:
self.syntax_error('DOCTYPE not at beginning of document')
k = self.parse_doctype(res)
if k < 0: break
self.__seen_doctype = res.group('name')
if self.__map_case:
self.__seen_doctype = self.__seen_doctype.lower()
self.lineno = self.lineno + rawdata[i:k].count('\n')
i = k
continue
elif rawdata[i] == '&':
if self.literal:
data = rawdata[i]
self.handle_data(data)
i = i+1
continue
res = charref.match(rawdata, i)
if res is not None:
i = res.end(0)
if rawdata[i-1] != ';':
self.syntax_error("`;' missing in charref")
i = i-1
if not self.stack:
self.syntax_error('data not in content')
self.handle_charref(res.group('char')[:-1])
self.lineno = self.lineno + res.group(0).count('\n')
continue
res = entityref.match(rawdata, i)
if res is not None:
i = res.end(0)
if rawdata[i-1] != ';':
self.syntax_error("`;' missing in entityref")
i = i-1
name = res.group('name')
if self.__map_case:
name = name.lower()
if name in self.entitydefs:
self.rawdata = rawdata = rawdata[:res.start(0)] + self.entitydefs[name] + rawdata[i:]
n = len(rawdata)
i = res.start(0)
else:
self.unknown_entityref(name)
self.lineno = self.lineno + res.group(0).count('\n')
continue
elif rawdata[i] == ']':
if self.literal:
data = rawdata[i]
self.handle_data(data)
i = i+1
continue
if n-i < 3:
break
if cdataclose.match(rawdata, i):
self.syntax_error("bogus `]]>'")
self.handle_data(rawdata[i])
i = i+1
continue
else:
raise Error('neither < nor & ??')
# We get here only if incomplete matches but
# nothing else
break
# end while
if i > 0:
self.__at_start = 0
if end and i < n:
data = rawdata[i]
self.syntax_error("bogus `%s'" % data)
if not self.__accept_utf8 and illegal.search(data):
self.syntax_error('illegal character in content')
self.handle_data(data)
self.lineno = self.lineno + data.count('\n')
self.rawdata = rawdata[i+1:]
return self.goahead(end)
self.rawdata = rawdata[i:]
if end:
if not self.__seen_starttag:
self.syntax_error('no elements in file')
if self.stack:
self.syntax_error('missing end tags')
while self.stack:
self.finish_endtag(self.stack[-1][0])
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i):
rawdata = self.rawdata
if rawdata[i:i+4] != '<!--':
raise Error('unexpected call to handle_comment')
res = commentclose.search(rawdata, i+4)
if res is None:
return -1
if doubledash.search(rawdata, i+4, res.start(0)):
self.syntax_error("`--' inside comment")
if rawdata[res.start(0)-1] == '-':
self.syntax_error('comment cannot end in three dashes')
if not self.__accept_utf8 and \
illegal.search(rawdata, i+4, res.start(0)):
self.syntax_error('illegal character in comment')
self.handle_comment(rawdata[i+4: res.start(0)])
return res.end(0)
# Internal -- handle DOCTYPE tag, return length or -1 if not terminated
def parse_doctype(self, res):
rawdata = self.rawdata
n = len(rawdata)
name = res.group('name')
if self.__map_case:
name = name.lower()
pubid, syslit = res.group('pubid', 'syslit')
if pubid is not None:
pubid = pubid[1:-1] # remove quotes
pubid = ' '.join(pubid.split()) # normalize
if syslit is not None: syslit = syslit[1:-1] # remove quotes
j = k = res.end(0)
if k >= n:
return -1
if rawdata[k] == '[':
level = 0
k = k+1
dq = sq = 0
while k < n:
c = rawdata[k]
if not sq and c == '"':
dq = not dq
elif not dq and c == "'":
sq = not sq
elif sq or dq:
pass
elif level <= 0 and c == ']':
res = endbracket.match(rawdata, k+1)
if res is None:
return -1
self.handle_doctype(name, pubid, syslit, rawdata[j+1:k])
return res.end(0)
elif c == '<':
level = level + 1
elif c == '>':
level = level - 1
if level < 0:
self.syntax_error("bogus `>' in DOCTYPE")
k = k+1
res = endbracketfind.match(rawdata, k)
if res is None:
return -1
if endbracket.match(rawdata, k) is None:
self.syntax_error('garbage in DOCTYPE')
self.handle_doctype(name, pubid, syslit, None)
return res.end(0)
# Internal -- handle CDATA tag, return length or -1 if not terminated
def parse_cdata(self, i):
rawdata = self.rawdata
if rawdata[i:i+9] != '<![CDATA[':
raise Error('unexpected call to parse_cdata')
res = cdataclose.search(rawdata, i+9)
if res is None:
return -1
if not self.__accept_utf8 and \
illegal.search(rawdata, i+9, res.start(0)):
self.syntax_error('illegal character in CDATA')
if not self.stack:
self.syntax_error('CDATA not in content')
self.handle_cdata(rawdata[i+9:res.start(0)])
return res.end(0)
__xml_namespace_attributes = {'ns':None, 'src':None, 'prefix':None}
# Internal -- handle a processing instruction tag
def parse_proc(self, i):
rawdata = self.rawdata
end = procclose.search(rawdata, i)
if end is None:
return -1
j = end.start(0)
if not self.__accept_utf8 and illegal.search(rawdata, i+2, j):
self.syntax_error('illegal character in processing instruction')
res = tagfind.match(rawdata, i+2)
if res is None:
raise Error('unexpected call to parse_proc')
k = res.end(0)
name = res.group(0)
if self.__map_case:
name = name.lower()
if name == 'xml:namespace':
self.syntax_error('old-fashioned namespace declaration')
self.__use_namespaces = -1
# namespace declaration
# this must come after the <?xml?> declaration (if any)
# and before the <!DOCTYPE> (if any).
if self.__seen_doctype or self.__seen_starttag:
self.syntax_error('xml:namespace declaration too late in document')
attrdict, namespace, k = self.parse_attributes(name, k, j)
if namespace:
self.syntax_error('namespace declaration inside namespace declaration')
for attrname in attrdict.keys():
if not attrname in self.__xml_namespace_attributes:
self.syntax_error("unknown attribute `%s' in xml:namespace tag" % attrname)
if not 'ns' in attrdict or not 'prefix' in attrdict:
self.syntax_error('xml:namespace without required attributes')
prefix = attrdict.get('prefix')
if ncname.match(prefix) is None:
self.syntax_error('xml:namespace illegal prefix value')
return end.end(0)
if prefix in self.__namespaces:
self.syntax_error('xml:namespace prefix not unique')
self.__namespaces[prefix] = attrdict['ns']
else:
if name.lower() == 'xml':
self.syntax_error('illegal processing instruction target name')
self.handle_proc(name, rawdata[k:j])
return end.end(0)
# Internal -- parse attributes between i and j
def parse_attributes(self, tag, i, j):
rawdata = self.rawdata
attrdict = {}
namespace = {}
while i < j:
res = attrfind.match(rawdata, i)
if res is None:
break
attrname, attrvalue = res.group('name', 'value')
if self.__map_case:
attrname = attrname.lower()
i = res.end(0)
if attrvalue is None:
self.syntax_error("no value specified for attribute `%s'" % attrname)
attrvalue = attrname
elif attrvalue[:1] == "'" == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
elif not self.__accept_unquoted_attributes:
self.syntax_error("attribute `%s' value not quoted" % attrname)
res = xmlns.match(attrname)
if res is not None:
# namespace declaration
ncname = res.group('ncname')
namespace[ncname or ''] = attrvalue or None
if not self.__use_namespaces:
self.__use_namespaces = len(self.stack)+1
continue
if '<' in attrvalue:
self.syntax_error("`<' illegal in attribute value")
if attrname in attrdict:
self.syntax_error("attribute `%s' specified twice" % attrname)
attrvalue = attrvalue.translate(attrtrans)
attrdict[attrname] = self.translate_references(attrvalue)
return attrdict, namespace, i
# Internal -- handle starttag, return length or -1 if not terminated
def parse_starttag(self, i):
rawdata = self.rawdata
# i points to start of tag
end = endbracketfind.match(rawdata, i+1)
if end is None:
return -1
tag = starttagmatch.match(rawdata, i)
if tag is None or tag.end(0) != end.end(0):
self.syntax_error('garbage in starttag')
return end.end(0)
nstag = tagname = tag.group('tagname')
if self.__map_case:
nstag = tagname = nstag.lower()
if not self.__seen_starttag and self.__seen_doctype and \
tagname != self.__seen_doctype:
self.syntax_error('starttag does not match DOCTYPE')
if self.__seen_starttag and not self.stack:
self.syntax_error('multiple elements on top level')
k, j = tag.span('attrs')
attrdict, nsdict, k = self.parse_attributes(tagname, k, j)
self.stack.append((tagname, nsdict, nstag))
if self.__use_namespaces:
res = qname.match(tagname)
else:
res = None
if res is not None:
prefix, nstag = res.group('prefix', 'local')
if prefix is None:
prefix = ''
ns = None
for t, d, nst in self.stack:
if prefix in d:
ns = d[prefix]
if ns is None and prefix != '':
ns = self.__namespaces.get(prefix)
if ns is not None:
nstag = ns + ' ' + nstag
elif prefix != '':
nstag = prefix + ':' + nstag # undo split
self.stack[-1] = tagname, nsdict, nstag
# translate namespace of attributes
attrnamemap = {} # map from new name to old name (used for error reporting)
for key in attrdict.keys():
attrnamemap[key] = key
if self.__use_namespaces:
nattrdict = {}
for key, val in attrdict.items():
okey = key
res = qname.match(key)
if res is not None:
aprefix, key = res.group('prefix', 'local')
if self.__map_case:
key = key.lower()
if aprefix is not None:
ans = None
for t, d, nst in self.stack:
if aprefix in d:
ans = d[aprefix]
if ans is None:
ans = self.__namespaces.get(aprefix)
if ans is not None:
key = ans + ' ' + key
else:
key = aprefix + ':' + key
nattrdict[key] = val
attrnamemap[key] = okey
attrdict = nattrdict
attributes = self.attributes.get(nstag)
if attributes is not None:
for key in attrdict.keys():
if not key in attributes:
self.syntax_error("unknown attribute `%s' in tag `%s'" % (attrnamemap[key], tagname))
for key, val in attributes.items():
if val is not None and not key in attrdict:
attrdict[key] = val
method = self.elements.get(nstag, (None, None))[0]
self.finish_starttag(nstag, attrdict, method)
if tag.group('slash') == '/':
self.finish_endtag(tagname)
return tag.end(0)
# Internal -- parse endtag
def parse_endtag(self, i):
rawdata = self.rawdata
end = endbracketfind.match(rawdata, i+1)
if end is None:
return -1
res = tagfind.match(rawdata, i+2)
if res is None:
if self.literal:
self.handle_data(rawdata[i])
return i+1
if not self.__accept_missing_endtag_name:
self.syntax_error('no name specified in end tag')
tag = self.stack[-1][0]
k = i+2
else:
tag = res.group(0)
if self.__map_case:
tag = tag.lower()
if self.literal:
if not self.stack or tag != self.stack[-1][0]:
self.handle_data(rawdata[i])
return i+1
k = res.end(0)
if endbracket.match(rawdata, k) is None:
self.syntax_error('garbage in end tag')
self.finish_endtag(tag)
return end.end(0)
# Internal -- finish processing of start tag
def finish_starttag(self, tagname, attrdict, method):
if method is not None:
self.handle_starttag(tagname, method, attrdict)
else:
self.unknown_starttag(tagname, attrdict)
# Internal -- finish processing of end tag
def finish_endtag(self, tag):
self.literal = 0
if not tag:
self.syntax_error('name-less end tag')
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
found = -1
for i in range(len(self.stack)):
if tag == self.stack[i][0]:
found = i
if found == -1:
self.syntax_error('unopened end tag')
return
while len(self.stack) > found:
if found < len(self.stack) - 1:
self.syntax_error('missing close tag for %s' % self.stack[-1][2])
nstag = self.stack[-1][2]
method = self.elements.get(nstag, (None, None))[1]
if method is not None:
self.handle_endtag(nstag, method)
else:
self.unknown_endtag(nstag)
if self.__use_namespaces == len(self.stack):
self.__use_namespaces = 0
del self.stack[-1]
# Overridable -- handle xml processing instruction
def handle_xml(self, encoding, standalone):
pass
# Overridable -- handle DOCTYPE
def handle_doctype(self, tag, pubid, syslit, data):
pass
# Overridable -- handle start tag
def handle_starttag(self, tag, method, attrs):
method(attrs)
# Overridable -- handle end tag
def handle_endtag(self, tag, method):
method()
# Example -- handle character reference, no need to override
def handle_charref(self, name):
try:
if name[0] == 'x':
n = int(name[1:], 16)
else:
n = int(name)
except ValueError:
self.unknown_charref(name)
return
if not 0 <= n <= 255:
self.unknown_charref(name)
return
self.handle_data(chr(n))
# Definition of entities -- derived classes may override
entitydefs = {'lt': '<', # must use charref
'gt': '>',
'amp': '&', # must use charref
'quot': '"',
'apos': ''',
}
# Example -- handle data, should be overridden
def handle_data(self, data):
pass
# Example -- handle cdata, could be overridden
def handle_cdata(self, data):
pass
# Example -- handle comment, could be overridden
def handle_comment(self, data):
pass
# Example -- handle processing instructions, could be overridden
def handle_proc(self, name, data):
pass
# Example -- handle relatively harmless syntax errors, could be overridden
def syntax_error(self, message):
raise Error('Syntax error at line %d: %s' % (self.lineno, message))
# To be overridden -- handlers for unknown objects
def unknown_starttag(self, tag, attrs): pass
def unknown_endtag(self, tag): pass
def unknown_charref(self, ref): pass
def unknown_entityref(self, name):
self.syntax_error("reference to unknown entity `&%s;'" % name)
class TestXMLParser(XMLParser):
def __init__(self, **kw):
self.testdata = ""
XMLParser.__init__(self, **kw)
def handle_xml(self, encoding, standalone):
self.flush()
print 'xml: encoding =',encoding,'standalone =',standalone
def handle_doctype(self, tag, pubid, syslit, data):
self.flush()
print 'DOCTYPE:',tag, repr(data)
def handle_data(self, data):
self.testdata = self.testdata + data
if len(repr(self.testdata)) >= 70:
self.flush()
def flush(self):
data = self.testdata
if data:
self.testdata = ""
print 'data:', repr(data)
def handle_cdata(self, data):
self.flush()
print 'cdata:', repr(data)
def handle_proc(self, name, data):
self.flush()
print 'processing:',name,repr(data)
def handle_comment(self, data):
self.flush()
r = repr(data)
if len(r) > 68:
r = r[:32] + '...' + r[-32:]
print 'comment:', r
def syntax_error(self, message):
print 'error at line %d:' % self.lineno, message
def unknown_starttag(self, tag, attrs):
self.flush()
if not attrs:
print 'start tag: <' + tag + '>'
else:
print 'start tag: <' + tag,
for name, value in attrs.items():
print name + '=' + '"' + value + '"',
print '>'
def unknown_endtag(self, tag):
self.flush()
print 'end tag: </' + tag + '>'
def unknown_entityref(self, ref):
self.flush()
print '*** unknown entity ref: &' + ref + ';'
def unknown_charref(self, ref):
self.flush()
print '*** unknown char ref: &#' + ref + ';'
def close(self):
XMLParser.close(self)
self.flush()
def test(args = None):
import sys, getopt
from time import time
if not args:
args = sys.argv[1:]
opts, args = getopt.getopt(args, 'st')
klass = TestXMLParser
do_time = 0
for o, a in opts:
if o == '-s':
klass = XMLParser
elif o == '-t':
do_time = 1
if args:
file = args[0]
else:
file = 'test.xml'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
x = klass()
t0 = time()
try:
if do_time:
x.feed(data)
x.close()
else:
for c in data:
x.feed(c)
x.close()
except Error, msg:
t1 = time()
print msg
if do_time:
print 'total time: %g' % (t1-t0)
sys.exit(1)
t1 = time()
if do_time:
print 'total time: %g' % (t1-t0)
if __name__ == '__main__':
test()
| gpl-2.0 |
excid3/python-apt | apt/cache.py | 2 | 12982 | # cache.py - apt cache abstraction
#
# Copyright (c) 2005 Canonical
#
# Author: Michael Vogt <michael.vogt@ubuntu.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
import apt_pkg
from apt import Package
import apt.progress
import os
import sys
class FetchCancelledException(IOError):
" Exception that is thrown when the user cancels a fetch operation "
pass
class FetchFailedException(IOError):
" Exception that is thrown when fetching fails "
pass
class LockFailedException(IOError):
" Exception that is thrown when locking fails "
pass
class Cache(object):
""" Dictionary-like package cache
This class has all the packages that are available in it's
dictionary
"""
def __init__(self, progress=None, rootdir=None, memonly=False):
self._callbacks = {}
if memonly:
# force apt to build its caches in memory
apt_pkg.Config.Set("Dir::Cache::pkgcache","")
if rootdir:
apt_pkg.Config.Set("Dir", rootdir)
apt_pkg.Config.Set("Dir::State::status", rootdir + "/var/lib/dpkg/status")
self.open(progress)
def _runCallbacks(self, name):
""" internal helper to run a callback """
if self._callbacks.has_key(name):
for callback in self._callbacks[name]:
callback()
def open(self, progress):
""" Open the package cache, after that it can be used like
a dictionary
"""
self._runCallbacks("cache_pre_open")
self._cache = apt_pkg.GetCache(progress)
self._depcache = apt_pkg.GetDepCache(self._cache)
self._records = apt_pkg.GetPkgRecords(self._cache)
self._list = apt_pkg.GetPkgSourceList()
self._list.ReadMainList()
self._dict = {}
# build the packages dict
if progress != None:
progress.Op = "Building data structures"
i=last=0
size=len(self._cache.Packages)
for pkg in self._cache.Packages:
if progress != None and last+100 < i:
progress.update(i/float(size)*100)
last=i
# drop stuff with no versions (cruft)
if len(pkg.VersionList) > 0:
self._dict[pkg.Name] = Package(self._cache, self._depcache,
self._records, self._list,
self, pkg)
i += 1
if progress != None:
progress.done()
self._runCallbacks("cache_post_open")
def __getitem__(self, key):
""" look like a dictionary (get key) """
return self._dict[key]
def __iter__(self):
for pkgname in self._dict.keys():
yield self._dict[pkgname]
raise StopIteration
def has_key(self, key):
return self._dict.has_key(key)
def __contains__(self, key):
return key in self._dict
def __len__(self):
return len(self._dict)
def keys(self):
return self._dict.keys()
def getChanges(self):
""" Get the marked changes """
changes = []
for name in self._dict.keys():
p = self._dict[name]
if p.markedUpgrade or p.markedInstall or p.markedDelete or \
p.markedDowngrade or p.markedReinstall:
changes.append(p)
return changes
def upgrade(self, distUpgrade=False):
""" Upgrade the all package, DistUpgrade will also install
new dependencies
"""
self.cachePreChange()
self._depcache.Upgrade(distUpgrade)
self.cachePostChange()
@property
def reqReinstallPkgs(self):
" return the packages not downloadable packages in reqreinst state "
reqreinst = set()
for pkg in self:
if (not pkg.candidateDownloadable and
(pkg._pkg.InstState == apt_pkg.InstStateReInstReq or
pkg._pkg.InstState == apt_pkg.InstStateHoldReInstReq)):
reqreinst.add(pkg.name)
return reqreinst
def _runFetcher(self, fetcher):
# do the actual fetching
res = fetcher.Run()
# now check the result (this is the code from apt-get.cc)
failed = False
transient = False
errMsg = ""
for item in fetcher.Items:
if item.Status == item.StatDone:
continue
if item.StatIdle:
transient = True
continue
errMsg += "Failed to fetch %s %s\n" % (item.DescURI,item.ErrorText)
failed = True
# we raise a exception if the download failed or it was cancelt
if res == fetcher.ResultCancelled:
raise FetchCancelledException, errMsg
elif failed:
raise FetchFailedException, errMsg
return res
def _fetchArchives(self, fetcher, pm):
""" fetch the needed archives """
# get lock
lockfile = apt_pkg.Config.FindDir("Dir::Cache::Archives") + "lock"
lock = apt_pkg.GetLock(lockfile)
if lock < 0:
raise LockFailedException, "Failed to lock %s" % lockfile
try:
# this may as well throw a SystemError exception
if not pm.GetArchives(fetcher, self._list, self._records):
return False
# now run the fetcher, throw exception if something fails to be
# fetched
return self._runFetcher(fetcher)
finally:
os.close(lock)
def update(self, fetchProgress=None):
" run the equivalent of apt-get update "
lockfile = apt_pkg.Config.FindDir("Dir::State::Lists") + "lock"
lock = apt_pkg.GetLock(lockfile)
if lock < 0:
raise LockFailedException, "Failed to lock %s" % lockfile
try:
if fetchProgress == None:
fetchProgress = apt.progress.FetchProgress()
return self._cache.Update(fetchProgress, self._list)
finally:
os.close(lock)
def installArchives(self, pm, installProgress):
installProgress.startUpdate()
res = installProgress.run(pm)
installProgress.finishUpdate()
return res
def commit(self, fetchProgress=None, installProgress=None):
""" Apply the marked changes to the cache """
# FIXME:
# use the new acquire/pkgmanager interface here,
# raise exceptions when a download or install fails
# and send proper error strings to the application.
# Current a failed download will just display "error"
# which is less than optimal!
if fetchProgress == None:
fetchProgress = apt.progress.FetchProgress()
if installProgress == None:
installProgress = apt.progress.InstallProgress()
pm = apt_pkg.GetPackageManager(self._depcache)
fetcher = apt_pkg.GetAcquire(fetchProgress)
while True:
# fetch archives first
res = self._fetchArchives(fetcher, pm)
# then install
res = self.installArchives(pm, installProgress)
if res == pm.ResultCompleted:
break
if res == pm.ResultFailed:
raise SystemError, "installArchives() failed"
# reload the fetcher for media swaping
fetcher.Shutdown()
return (res == pm.ResultCompleted)
# cache changes
def cachePostChange(self):
" called internally if the cache has changed, emit a signal then "
self._runCallbacks("cache_post_change")
def cachePreChange(self):
""" called internally if the cache is about to change, emit
a signal then """
self._runCallbacks("cache_pre_change")
def connect(self, name, callback):
""" connect to a signal, currently only used for
cache_{post,pre}_{changed,open} """
if not self._callbacks.has_key(name):
self._callbacks[name] = []
self._callbacks[name].append(callback)
# ----------------------------- experimental interface
class Filter(object):
""" Filter base class """
def apply(self, pkg):
""" Filter function, return True if the package matchs a
filter criteria and False otherwise
"""
return True
class MarkedChangesFilter(Filter):
""" Filter that returns all marked changes """
def apply(self, pkg):
if pkg.markedInstall or pkg.markedDelete or pkg.markedUpgrade:
return True
else:
return False
class FilteredCache(object):
""" A package cache that is filtered.
Can work on a existing cache or create a new one
"""
def __init__(self, cache=None, progress=None):
if cache == None:
self.cache = Cache(progress)
else:
self.cache = cache
self.cache.connect("cache_post_change", self.filterCachePostChange)
self.cache.connect("cache_post_open", self.filterCachePostChange)
self._filtered = {}
self._filters = []
def __len__(self):
return len(self._filtered)
def __getitem__(self, key):
return self.cache._dict[key]
def keys(self):
return self._filtered.keys()
def has_key(self, key):
return self._filtered.has_key(key)
def _reapplyFilter(self):
" internal helper to refilter "
self._filtered = {}
for pkg in self.cache._dict.keys():
for f in self._filters:
if f.apply(self.cache._dict[pkg]):
self._filtered[pkg] = 1
break
def setFilter(self, filter):
" set the current active filter "
self._filters = []
self._filters.append(filter)
#self._reapplyFilter()
# force a cache-change event that will result in a refiltering
self.cache.cachePostChange()
def filterCachePostChange(self):
" called internally if the cache changes, emit a signal then "
#print "filterCachePostChange()"
self._reapplyFilter()
# def connect(self, name, callback):
# self.cache.connect(name, callback)
def __getattr__(self, key):
" we try to look exactly like a real cache "
#print "getattr: %s " % key
if self.__dict__.has_key(key):
return self.__dict__[key]
else:
return getattr(self.cache, key)
def cache_pre_changed():
print "cache pre changed"
def cache_post_changed():
print "cache post changed"
# internal test code
if __name__ == "__main__":
print "Cache self test"
apt_pkg.init()
c = Cache(apt.progress.OpTextProgress())
c.connect("cache_pre_change", cache_pre_changed)
c.connect("cache_post_change", cache_post_changed)
print c.has_key("aptitude")
p = c["aptitude"]
print p.name
print len(c)
for pkg in c.keys():
x= c[pkg].name
c.upgrade()
changes = c.getChanges()
print len(changes)
for p in changes:
#print p.name
x = p.name
# see if fetching works
for d in ["/tmp/pytest", "/tmp/pytest/partial"]:
if not os.path.exists(d):
os.mkdir(d)
apt_pkg.Config.Set("Dir::Cache::Archives","/tmp/pytest")
pm = apt_pkg.GetPackageManager(c._depcache)
fetcher = apt_pkg.GetAcquire(apt.progress.TextFetchProgress())
c._fetchArchives(fetcher, pm)
#sys.exit(1)
print "Testing filtered cache (argument is old cache)"
f = FilteredCache(c)
f.cache.connect("cache_pre_change", cache_pre_changed)
f.cache.connect("cache_post_change", cache_post_changed)
f.cache.upgrade()
f.setFilter(MarkedChangesFilter())
print len(f)
for pkg in f.keys():
#print c[pkg].name
x = f[pkg].name
print len(f)
print "Testing filtered cache (no argument)"
f = FilteredCache(progress=OpTextProgress())
f.cache.connect("cache_pre_change", cache_pre_changed)
f.cache.connect("cache_post_change", cache_post_changed)
f.cache.upgrade()
f.setFilter(MarkedChangesFilter())
print len(f)
for pkg in f.keys():
#print c[pkg].name
x = f[pkg].name
print len(f)
| gpl-2.0 |
SlimRemix/android_external_chromium_org | tools/telemetry/telemetry/core/platform/power_monitor/sysfs_power_monitor_unittest.py | 25 | 5761 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core.platform.power_monitor import sysfs_power_monitor
class SysfsPowerMonitorMonitorTest(unittest.TestCase):
initial_freq = {
'cpu0': '1700000 6227\n1600000 0\n1500000 0\n1400000 28\n1300000 22\n'
'1200000 14\n1100000 19\n1000000 22\n900000 14\n800000 20\n'
'700000 15\n600000 23\n500000 23\n400000 9\n300000 28\n200000 179',
'cpu1': '1700000 11491\n1600000 0\n1500000 0\n1400000 248\n1300000 1166\n'
'1200000 2082\n1100000 2943\n1000000 6560\n900000 12517\n'
'800000 8690\n700000 5105\n600000 3800\n500000 5131\n400000 5479\n'
'300000 7571\n200000 133618',
'cpu2': '1700000 1131',
'cpu3': '1700000 1131'
}
final_freq = {
'cpu0': '1700000 7159\n1600000 0\n1500000 0\n1400000 68\n1300000 134\n'
'1200000 194\n1100000 296\n1000000 716\n900000 1301\n800000 851\n'
'700000 554\n600000 343\n500000 612\n400000 691\n300000 855\n'
'200000 15525',
'cpu1': '1700000 12048\n1600000 0\n1500000 0\n1400000 280\n1300000 1267\n'
'1200000 2272\n1100000 3163\n1000000 7039\n900000 13800\n'
'800000 9599\n700000 5655\n600000 4144\n500000 5655\n400000 6005\n'
'300000 8288\n200000 149724',
'cpu2': None,
'cpu3': ''
}
expected_initial_freq = {
'cpu0': {
1700000000: 6227,
1600000000: 0,
1500000000: 0,
1400000000: 28,
1300000000: 22,
1200000000: 14,
1100000000: 19,
1000000000: 22,
900000000: 14,
800000000: 20,
700000000: 15,
600000000: 23,
500000000: 23,
400000000: 9,
300000000: 28,
200000000: 179
},
'cpu1': {
1700000000: 11491,
1600000000: 0,
1500000000: 0,
1400000000: 248,
1300000000: 1166,
1200000000: 2082,
1100000000: 2943,
1000000000: 6560,
900000000: 12517,
800000000: 8690,
700000000: 5105,
600000000: 3800,
500000000: 5131,
400000000: 5479,
300000000: 7571,
200000000: 133618
},
'cpu2': {
1700000000: 1131
},
'cpu3': {
1700000000: 1131
}
}
expected_final_freq = {
'cpu0': {
1700000000: 7159,
1600000000: 0,
1500000000: 0,
1400000000: 68,
1300000000: 134,
1200000000: 194,
1100000000: 296,
1000000000: 716,
900000000: 1301,
800000000: 851,
700000000: 554,
600000000: 343,
500000000: 612,
400000000: 691,
300000000: 855,
200000000: 15525
},
'cpu1': {
1700000000: 12048,
1600000000: 0,
1500000000: 0,
1400000000: 280,
1300000000: 1267,
1200000000: 2272,
1100000000: 3163,
1000000000: 7039,
900000000: 13800,
800000000: 9599,
700000000: 5655,
600000000: 4144,
500000000: 5655,
400000000: 6005,
300000000: 8288,
200000000: 149724
},
'cpu2': None,
'cpu3': {}
}
expected_freq_percents = {
'whole_package': {
1700000000: 3.29254111574526,
1600000000: 0.0,
1500000000: 0.0,
1400000000: 0.15926805099535601,
1300000000: 0.47124116307273645,
1200000000: 0.818756100807525,
1100000000: 1.099381692400982,
1000000000: 2.5942528544384302,
900000000: 5.68661122326737,
800000000: 3.850545467654628,
700000000: 2.409691872245393,
600000000: 1.4693702487650486,
500000000: 2.4623575553879373,
400000000: 2.672038150383057,
300000000: 3.415770495015825,
200000000: 69.59817400982045
},
'cpu0': {
1700000000: 4.113700564971752,
1600000000: 0.0,
1500000000: 0.0,
1400000000: 0.1765536723163842,
1300000000: 0.4943502824858757,
1200000000: 0.7944915254237288,
1100000000: 1.2226341807909604,
1000000000: 3.0632062146892656,
900000000: 5.680614406779661,
800000000: 3.6679025423728815,
700000000: 2.379060734463277,
600000000: 1.4124293785310735,
500000000: 2.599752824858757,
400000000: 3.0102401129943503,
300000000: 3.650247175141243,
200000000: 67.73481638418079
},
'cpu1': {
1700000000: 2.4713816665187682,
1600000000: 0.0,
1500000000: 0.0,
1400000000: 0.1419824296743278,
1300000000: 0.44813204365959713,
1200000000: 0.8430206761913214,
1100000000: 0.9761292040110037,
1000000000: 2.1252994941875945,
900000000: 5.69260803975508,
800000000: 4.033188392936374,
700000000: 2.4403230100275093,
600000000: 1.526311118999024,
500000000: 2.3249622859171177,
400000000: 2.3338361877717633,
300000000: 3.1812938148904073,
200000000: 71.46153163546012
},
'cpu2': {
1700000000: 0.0,
},
'cpu3': {
1700000000: 0.0,
}
}
def testParseCpuFreq(self):
initial = sysfs_power_monitor.SysfsPowerMonitor.ParseFreqSample(
self.initial_freq)
final = sysfs_power_monitor.SysfsPowerMonitor.ParseFreqSample(
self.final_freq)
self.assertDictEqual(initial, self.expected_initial_freq)
self.assertDictEqual(final, self.expected_final_freq)
def testComputeCpuStats(self):
results = sysfs_power_monitor.SysfsPowerMonitor.ComputeCpuStats(
self.expected_initial_freq, self.expected_final_freq)
for cpu in self.expected_freq_percents:
for freq in results[cpu]:
self.assertAlmostEqual(results[cpu][freq],
self.expected_freq_percents[cpu][freq])
| bsd-3-clause |
isandlaTech/cohorte-demos | led/dump/led-demo-yun/cohorte/dist/cohorte-1.0.0-20141216.234517-57-python-distribution/repo/requests/structures.py | 1160 | 2977 | # -*- coding: utf-8 -*-
"""
requests.structures
~~~~~~~~~~~~~~~~~~~
Data structures that power Requests.
"""
import collections
class CaseInsensitiveDict(collections.MutableMapping):
"""
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super(LookupDict, self).__init__()
def __repr__(self):
return '<lookup \'%s\'>' % (self.name)
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default)
| apache-2.0 |
rdmorganiser/rdmo | rdmo/questions/migrations/0013_permissions.py | 2 | 1665 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-02-28 12:34
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('questions', '0012_meta'),
]
operations = [
migrations.AlterModelOptions(
name='catalog',
options={'ordering': ('order',), 'permissions': (('view_catalog', 'Can view Catalog'),), 'verbose_name': 'Catalog', 'verbose_name_plural': 'Catalogs'},
),
migrations.AlterModelOptions(
name='question',
options={'permissions': (('view_question', 'Can view Question'),), 'verbose_name': 'Question', 'verbose_name_plural': 'Questions'},
),
migrations.AlterModelOptions(
name='questionentity',
options={'ordering': ('subsection__section__catalog__order', 'subsection__section__order', 'subsection__order', 'order'), 'permissions': (('view_questionentity', 'Can view Question entity'),), 'verbose_name': 'Question entity', 'verbose_name_plural': 'Question entities'},
),
migrations.AlterModelOptions(
name='section',
options={'ordering': ('catalog__order', 'order'), 'permissions': (('view_section', 'Can view Section'),), 'verbose_name': 'Section', 'verbose_name_plural': 'Sections'},
),
migrations.AlterModelOptions(
name='subsection',
options={'ordering': ('section__catalog__order', 'section__order', 'order'), 'permissions': (('view_subsection', 'Can view Subsection'),), 'verbose_name': 'Subsection', 'verbose_name_plural': 'Subsections'},
),
]
| apache-2.0 |
shadyueh/pyranking | env/lib/python2.7/site-packages/django/conf/__init__.py | 135 | 7622 | """
Settings and configuration for Django.
Values will be read from the module specified by the DJANGO_SETTINGS_MODULE environment
variable, and then from django.conf.global_settings; see the global settings file for
a list of all possible variables.
"""
import importlib
import os
import time
import warnings
from django.conf import global_settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.functional import LazyObject, empty
ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE"
class LazySettings(LazyObject):
"""
A lazy proxy for either global Django settings or a custom settings object.
The user can manually configure settings prior to using them. Otherwise,
Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE.
"""
def _setup(self, name=None):
"""
Load the settings module pointed to by the environment variable. This
is used the first time we need any settings at all, if the user has not
previously configured the settings manually.
"""
settings_module = os.environ.get(ENVIRONMENT_VARIABLE)
if not settings_module:
desc = ("setting %s" % name) if name else "settings"
raise ImproperlyConfigured(
"Requested %s, but settings are not configured. "
"You must either define the environment variable %s "
"or call settings.configure() before accessing settings."
% (desc, ENVIRONMENT_VARIABLE))
self._wrapped = Settings(settings_module)
def __repr__(self):
# Hardcode the class name as otherwise it yields 'Settings'.
if self._wrapped is empty:
return '<LazySettings [Unevaluated]>'
return '<LazySettings "%(settings_module)s">' % {
'settings_module': self._wrapped.SETTINGS_MODULE,
}
def __getattr__(self, name):
if self._wrapped is empty:
self._setup(name)
return getattr(self._wrapped, name)
def configure(self, default_settings=global_settings, **options):
"""
Called to manually configure the settings. The 'default_settings'
parameter sets where to retrieve any unspecified values from (its
argument must support attribute access (__getattr__)).
"""
if self._wrapped is not empty:
raise RuntimeError('Settings already configured.')
holder = UserSettingsHolder(default_settings)
for name, value in options.items():
setattr(holder, name, value)
self._wrapped = holder
@property
def configured(self):
"""
Returns True if the settings have already been configured.
"""
return self._wrapped is not empty
class BaseSettings(object):
"""
Common logic for settings whether set by a module or by the user.
"""
def __setattr__(self, name, value):
if name in ("MEDIA_URL", "STATIC_URL") and value and not value.endswith('/'):
raise ImproperlyConfigured("If set, %s must end with a slash" % name)
object.__setattr__(self, name, value)
class Settings(BaseSettings):
def __init__(self, settings_module):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_settings):
if setting.isupper():
setattr(self, setting, getattr(global_settings, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module
mod = importlib.import_module(self.SETTINGS_MODULE)
tuple_settings = (
"ALLOWED_INCLUDE_ROOTS",
"INSTALLED_APPS",
"TEMPLATE_DIRS",
"LOCALE_PATHS",
)
self._explicit_settings = set()
for setting in dir(mod):
if setting.isupper():
setting_value = getattr(mod, setting)
if (setting in tuple_settings and
not isinstance(setting_value, (list, tuple))):
raise ImproperlyConfigured("The %s setting must be a list or a tuple. "
"Please fix your settings." % setting)
setattr(self, setting, setting_value)
self._explicit_settings.add(setting)
if not self.SECRET_KEY:
raise ImproperlyConfigured("The SECRET_KEY setting must not be empty.")
if ('django.contrib.auth.middleware.AuthenticationMiddleware' in self.MIDDLEWARE_CLASSES and
'django.contrib.auth.middleware.SessionAuthenticationMiddleware' not in self.MIDDLEWARE_CLASSES):
warnings.warn(
"Session verification will become mandatory in Django 1.10. "
"Please add 'django.contrib.auth.middleware.SessionAuthenticationMiddleware' "
"to your MIDDLEWARE_CLASSES setting when you are ready to opt-in after "
"reading the upgrade considerations in the 1.8 release notes.",
RemovedInDjango110Warning
)
if hasattr(time, 'tzset') and self.TIME_ZONE:
# When we can, attempt to validate the timezone. If we can't find
# this file, no check happens and it's harmless.
zoneinfo_root = '/usr/share/zoneinfo'
if (os.path.exists(zoneinfo_root) and not
os.path.exists(os.path.join(zoneinfo_root, *(self.TIME_ZONE.split('/'))))):
raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE)
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
os.environ['TZ'] = self.TIME_ZONE
time.tzset()
def is_overridden(self, setting):
return setting in self._explicit_settings
def __repr__(self):
return '<%(cls)s "%(settings_module)s">' % {
'cls': self.__class__.__name__,
'settings_module': self.SETTINGS_MODULE,
}
class UserSettingsHolder(BaseSettings):
"""
Holder for user configured settings.
"""
# SETTINGS_MODULE doesn't make much sense in the manually configured
# (standalone) case.
SETTINGS_MODULE = None
def __init__(self, default_settings):
"""
Requests for configuration variables not in this class are satisfied
from the module specified in default_settings (if possible).
"""
self.__dict__['_deleted'] = set()
self.default_settings = default_settings
def __getattr__(self, name):
if name in self._deleted:
raise AttributeError
return getattr(self.default_settings, name)
def __setattr__(self, name, value):
self._deleted.discard(name)
super(UserSettingsHolder, self).__setattr__(name, value)
def __delattr__(self, name):
self._deleted.add(name)
if hasattr(self, name):
super(UserSettingsHolder, self).__delattr__(name)
def __dir__(self):
return list(self.__dict__) + dir(self.default_settings)
def is_overridden(self, setting):
deleted = (setting in self._deleted)
set_locally = (setting in self.__dict__)
set_on_default = getattr(self.default_settings, 'is_overridden', lambda s: False)(setting)
return (deleted or set_locally or set_on_default)
def __repr__(self):
return '<%(cls)s>' % {
'cls': self.__class__.__name__,
}
settings = LazySettings()
| mit |
reox/androguard | androguard/decompiler/dad/writer.py | 2 | 26467 | # This file is part of Androguard.
#
# Copyright (c) 2012 Geoffroy Gueguen <geoffroy.gueguen@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from struct import unpack
from androguard.core import mutf8
from androguard.decompiler.dad.util import get_type
from androguard.decompiler.dad.opcode_ins import Op
from androguard.decompiler.dad.instruction import (
Constant, ThisParam, BinaryExpression, BaseClass, InstanceExpression,
NewInstance, Variable, BinaryCompExpression)
logger = logging.getLogger('dad.writer')
class Writer:
"""
Transforms a method into Java code.
"""
def __init__(self, graph, method):
self.graph = graph
self.method = method
self.visited_nodes = set()
self.ind = 4
self.buffer = []
self.buffer2 = []
self.loop_follow = [None]
self.if_follow = [None]
self.switch_follow = [None]
self.latch_node = [None]
self.try_follow = [None]
self.next_case = None
self.skip = False
self.need_break = True
def __str__(self):
return ''.join([str(i) for i in self.buffer])
def str_ext(self):
return self.buffer2
def inc_ind(self, i=1):
self.ind += (4 * i)
def dec_ind(self, i=1):
self.ind -= (4 * i)
def space(self):
if self.skip:
self.skip = False
return ''
return ' ' * self.ind
def write_ind(self):
if self.skip:
self.skip = False
else:
self.write(self.space())
self.write_ext(('INDENTATION', self.space()))
def write(self, s, data=None):
self.buffer.append(s)
# old method, still used
# TODO: clean?
if data:
self.buffer2.append((data, s))
# at minimum, we have t as a tuple of the form:
# (TYPE_STR, MY_STR) such as ('THIS', 'this')
# where the 2nd field is the actual generated source code
# We can have more fields, for example:
# ('METHOD', 'sendToServer', 'this -> sendToServer', <androguard.decompiler.dad.instruction.ThisParam>)
def write_ext(self, t):
if not isinstance(t, tuple):
raise "Error in write_ext: %s not a tuple" % str(t)
self.buffer2.append(t)
def end_ins(self):
self.write(';\n')
self.write_ext(('END_INSTRUCTION', ';\n'))
def write_ind_visit_end(self, lhs, s, rhs=None, data=None):
self.write_ind()
lhs.visit(self)
self.write(s)
self.write_ext(('TODO_4343', s, data))
if rhs is not None:
rhs.visit(self)
self.end_ins()
# TODO: prefer this class as write_ind_visit_end that should be deprecated
# at the end
def write_ind_visit_end_ext(self,
lhs,
before,
s,
after,
rhs=None,
data=None,
subsection='UNKNOWN_SUBSECTION'):
self.write_ind()
lhs.visit(self)
self.write(before + s + after)
self.write_ext(('BEFORE', before))
self.write_ext((subsection, s, data))
self.write_ext(('AFTER', after))
if rhs is not None:
rhs.visit(self)
self.end_ins()
def write_inplace_if_possible(self, lhs, rhs):
if isinstance(rhs, BinaryExpression) and lhs == rhs.var_map[rhs.arg1]:
exp_rhs = rhs.var_map[rhs.arg2]
if rhs.op in '+-' and isinstance(exp_rhs, Constant) and \
exp_rhs.get_int_value() == 1:
return self.write_ind_visit_end(lhs, rhs.op * 2, data=rhs)
return self.write_ind_visit_end(
lhs,
' %s= ' % rhs.op,
exp_rhs,
data=rhs)
return self.write_ind_visit_end(lhs, ' = ', rhs, data=rhs)
def visit_ins(self, ins):
ins.visit(self)
def write_method(self):
acc = []
access = self.method.access
self.constructor = False
for modifier in access:
if modifier == 'constructor':
self.constructor = True
continue
acc.append(modifier)
self.write('\n%s' % self.space())
self.write_ext(('NEWLINE', '\n%s' % (self.space())))
if acc:
self.write('%s ' % ' '.join(acc))
self.write_ext(('PROTOTYPE_ACCESS', '%s ' % ' '.join(acc)))
if self.constructor:
name = get_type(self.method.cls_name).split('.')[-1]
self.write(name)
self.write_ext(('NAME_METHOD_PROTOTYPE', '%s' % name, self.method))
else:
self.write('{} {}'.format(get_type(self.method.type), self.method.name))
self.write_ext(
('PROTOTYPE_TYPE', '%s' % get_type(self.method.type)))
self.write_ext(('SPACE', ' '))
self.write_ext(
('NAME_METHOD_PROTOTYPE', '%s' % self.method.name, self.method))
params = self.method.lparams
if 'static' not in access:
params = params[1:]
proto = ''
self.write_ext(('PARENTHESIS_START', '('))
if self.method.params_type:
proto = ', '.join(['{} p{}'.format(get_type(p_type), param) for p_type,
param in zip(self.method.params_type, params)])
first = True
for p_type, param in zip(self.method.params_type, params):
if not first:
self.write_ext(('COMMA', ', '))
else:
first = False
self.write_ext(('ARG_TYPE', '%s' % get_type(p_type)))
self.write_ext(('SPACE', ' '))
self.write_ext(('NAME_ARG', 'p%s' % param, p_type, self.method))
self.write_ext(('PARENTHESIS_END', ')'))
self.write('(%s)' % proto)
if self.graph is None:
self.write(';\n')
self.write_ext(('METHOD_END_NO_CONTENT', ';\n'))
return
self.write('\n%s{\n' % self.space())
self.write_ext(('METHOD_START', '\n%s{\n' % self.space()))
self.inc_ind()
self.visit_node(self.graph.entry)
self.dec_ind()
self.write('%s}\n' % self.space())
self.write_ext(('METHOD_END', '%s}\n' % self.space()))
def visit_node(self, node):
if node in (self.if_follow[-1], self.switch_follow[-1],
self.loop_follow[-1], self.latch_node[-1],
self.try_follow[-1]):
return
if not node.type.is_return and node in self.visited_nodes:
return
self.visited_nodes.add(node)
for var in node.var_to_declare:
var.visit_decl(self)
var.declared = True
node.visit(self)
def visit_loop_node(self, loop):
follow = loop.follow['loop']
if follow is None and not loop.looptype.is_endless:
logger.error('Loop has no follow !')
if loop.looptype.is_pretest:
if loop.true is follow:
loop.neg()
loop.true, loop.false = loop.false, loop.true
self.write('%swhile (' % self.space())
self.write_ext(('WHILE', '%swhile (' % self.space()))
loop.visit_cond(self)
self.write(') {\n')
self.write_ext(('WHILE_START', ') {\n'))
elif loop.looptype.is_posttest:
self.write('%sdo {\n' % self.space())
self.write_ext(('DO', '%sdo {\n' % self.space()))
self.latch_node.append(loop.latch)
elif loop.looptype.is_endless:
self.write('%swhile(true) {\n' % self.space())
self.write_ext(('WHILE_TRUE', '%swhile(true) {\n' % self.space()))
self.inc_ind()
self.loop_follow.append(follow)
if loop.looptype.is_pretest:
self.visit_node(loop.true)
else:
self.visit_node(loop.cond)
self.loop_follow.pop()
self.dec_ind()
if loop.looptype.is_pretest:
self.write('%s}\n' % self.space())
self.write_ext(('END_PRETEST', '%s}\n' % self.space()))
elif loop.looptype.is_posttest:
self.latch_node.pop()
self.write('%s} while(' % self.space())
self.write_ext(('WHILE_POSTTEST', '%s} while(' % self.space()))
loop.latch.visit_cond(self)
self.write(');\n')
self.write_ext(('POSTTEST_END', ');\n'))
else:
self.inc_ind()
self.visit_node(loop.latch)
self.dec_ind()
self.write('%s}\n' % self.space())
self.write_ext(('END_LOOP', '%s}\n' % self.space()))
if follow is not None:
self.visit_node(follow)
def visit_cond_node(self, cond):
follow = cond.follow['if']
if cond.false is cond.true:
self.write('%s// Both branches of the condition point to the same'
' code.\n' % self.space())
self.write_ext(
('COMMENT_ERROR_MSG',
'%s// Both branches of the condition point to the same'
' code.\n' % self.space()))
self.write('%s// if (' % self.space())
self.write_ext(('COMMENT_IF', '%s// if (' % self.space()))
cond.visit_cond(self)
self.write(') {\n')
self.write_ext(('COMMENT_COND_END', ') {\n'))
self.inc_ind()
self.visit_node(cond.true)
self.dec_ind()
self.write('%s// }\n' % self.space(), data="COMMENT_IF_COND_END")
return
if cond.false is self.loop_follow[-1]:
cond.neg()
cond.true, cond.false = cond.false, cond.true
if self.loop_follow[-1] in (cond.true, cond.false):
self.write('%sif (' % self.space(), data="IF_2")
cond.visit_cond(self)
self.write(') {\n', data="IF_TRUE_2")
self.inc_ind()
self.write('%sbreak;\n' % self.space(), data="BREAK")
self.dec_ind()
self.write('%s}\n' % self.space(), data="IF_END_2")
self.visit_node(cond.false)
elif follow is not None:
if cond.true in (follow, self.next_case) or \
cond.num > cond.true.num:
# or cond.true.num > cond.false.num:
cond.neg()
cond.true, cond.false = cond.false, cond.true
self.if_follow.append(follow)
if cond.true: # in self.visited_nodes:
self.write('%sif (' % self.space(), data="IF")
cond.visit_cond(self)
self.write(') {\n', data="IF_TRUE")
self.inc_ind()
self.visit_node(cond.true)
self.dec_ind()
is_else = not (follow in (cond.true, cond.false))
if is_else and not cond.false in self.visited_nodes:
self.write('%s} else {\n' % self.space(), data="IF_FALSE")
self.inc_ind()
self.visit_node(cond.false)
self.dec_ind()
self.if_follow.pop()
self.write('%s}\n' % self.space(), data="IF_END")
self.visit_node(follow)
else:
self.write('%sif (' % self.space(), data="IF_3")
cond.visit_cond(self)
self.write(') {\n', data="IF_COND_3")
self.inc_ind()
self.visit_node(cond.true)
self.dec_ind()
self.write('%s} else {\n' % self.space(), data="ELSE_3")
self.inc_ind()
self.visit_node(cond.false)
self.dec_ind()
self.write('%s}\n' % self.space(), data="IF_END_3")
def visit_short_circuit_condition(self, nnot, aand, cond1, cond2):
if nnot:
cond1.neg()
self.write('(', data="TODO24")
cond1.visit_cond(self)
self.write(') %s (' % ['||', '&&'][aand], data="TODO25")
cond2.visit_cond(self)
self.write(')', data="TODO26")
def visit_switch_node(self, switch):
lins = switch.get_ins()
for ins in lins[:-1]:
self.visit_ins(ins)
switch_ins = switch.get_ins()[-1]
self.write('%sswitch (' % self.space(), data="SWITCH")
self.visit_ins(switch_ins)
self.write(') {\n', data="SWITCH_END")
follow = switch.follow['switch']
cases = switch.cases
self.switch_follow.append(follow)
default = switch.default
for i, node in enumerate(cases):
if node in self.visited_nodes:
continue
self.inc_ind()
for case in switch.node_to_case[node]:
self.write(
'%scase %d:\n' % (self.space(), case),
data="CASE_XX")
if i + 1 < len(cases):
self.next_case = cases[i + 1]
else:
self.next_case = None
if node is default:
self.write('%sdefault:\n' % self.space(), data="CASE_DEFAULT")
default = None
self.inc_ind()
self.visit_node(node)
if self.need_break:
self.write('%sbreak;\n' % self.space(), data="CASE_BREAK")
else:
self.need_break = True
self.dec_ind(2)
if default not in (None, follow):
self.inc_ind()
self.write('%sdefault:\n' % self.space(), data="CASE_DEFAULT_2")
self.inc_ind()
self.visit_node(default)
self.dec_ind(2)
self.write('%s}\n' % self.space(), data="CASE_END")
self.switch_follow.pop()
self.visit_node(follow)
def visit_statement_node(self, stmt):
sucs = self.graph.sucs(stmt)
for ins in stmt.get_ins():
self.visit_ins(ins)
if len(sucs) == 1:
if sucs[0] is self.loop_follow[-1]:
self.write('%sbreak;\n' % self.space(), data="BREAK_2")
elif sucs[0] is self.next_case:
self.need_break = False
else:
self.visit_node(sucs[0])
def visit_try_node(self, try_node):
self.write('%stry {\n' % self.space(), data="TRY_START")
self.inc_ind()
self.try_follow.append(try_node.follow)
self.visit_node(try_node.try_start)
self.dec_ind()
self.write('%s}' % self.space(), data="TRY_START_END")
for catch in try_node.catch:
self.visit_node(catch)
self.write('\n', data="NEWLINE_END_TRY")
self.visit_node(self.try_follow.pop())
def visit_catch_node(self, catch_node):
self.write(' catch (', data="CATCH")
catch_node.visit_exception(self)
self.write(') {\n', data="CATCH_START")
self.inc_ind()
self.visit_node(catch_node.catch_start)
self.dec_ind()
self.write('%s}' % self.space(), data="CATCH_END")
def visit_return_node(self, ret):
self.need_break = False
for ins in ret.get_ins():
self.visit_ins(ins)
def visit_throw_node(self, throw):
for ins in throw.get_ins():
self.visit_ins(ins)
def visit_decl(self, var):
if not var.declared:
var_type = var.get_type() or 'unknownType'
self.write('{}{} v{}'.format(
self.space(), get_type(var_type), var.name),
data="DECLARATION")
self.end_ins()
def visit_constant(self, cst):
if isinstance(cst, str):
return self.write(string(cst), data="CONSTANT_STRING")
self.write('%r' % cst,
data="CONSTANT_INTEGER") # INTEGER or also others?
def visit_base_class(self, cls, data=None):
self.write(cls)
self.write_ext(('NAME_BASE_CLASS', cls, data))
def visit_variable(self, var):
var_type = var.get_type() or 'unknownType'
if not var.declared:
self.write('%s ' % get_type(var_type))
self.write_ext(
('VARIABLE_TYPE', '%s' % get_type(var_type), var_type))
self.write_ext(('SPACE', ' '))
var.declared = True
self.write('v%s' % var.name)
self.write_ext(('NAME_VARIABLE', 'v%s' % var.name, var, var_type))
def visit_param(self, param, data=None):
self.write('p%s' % param)
self.write_ext(('NAME_PARAM', 'p%s' % param, data))
def visit_this(self):
self.write('this', data="THIS")
def visit_super(self):
self.write('super')
def visit_assign(self, lhs, rhs):
if lhs is not None:
return self.write_inplace_if_possible(lhs, rhs)
self.write_ind()
rhs.visit(self)
if not self.skip:
self.end_ins()
def visit_move_result(self, lhs, rhs):
self.write_ind_visit_end(lhs, ' = ', rhs)
def visit_move(self, lhs, rhs):
if lhs is not rhs:
self.write_inplace_if_possible(lhs, rhs)
def visit_astore(self, array, index, rhs, data=None):
self.write_ind()
array.visit(self)
self.write('[', data=("ASTORE_START", data))
index.visit(self)
self.write('] = ', data="ASTORE_END")
rhs.visit(self)
self.end_ins()
def visit_put_static(self, cls, name, rhs):
self.write_ind()
self.write('{}.{} = '.format(cls, name), data="STATIC_PUT")
rhs.visit(self)
self.end_ins()
def visit_put_instance(self, lhs, name, rhs, data=None):
self.write_ind_visit_end_ext(
lhs,
'.',
'%s' % name,
' = ',
rhs,
data=data,
subsection='NAME_CLASS_ASSIGNMENT')
def visit_new(self, atype, data=None):
self.write('new %s' % get_type(atype))
self.write_ext(('NEW', 'new '))
self.write_ext(
('NAME_CLASS_NEW', '%s' % get_type(atype), data.type, data))
def visit_invoke(self, name, base, ptype, rtype, args, invokeInstr):
if isinstance(base, ThisParam):
if name == '<init>':
if self.constructor and len(args) == 0:
self.skip = True
return
if invokeInstr and base.type[1:-1].replace('/', '.') != invokeInstr.cls:
base.super = True
base.visit(self)
if name != '<init>':
if isinstance(base, BaseClass):
call_name = "{} -> {}".format(base.cls, name)
elif isinstance(base, InstanceExpression):
call_name = "{} -> {}".format(base.ftype, name)
elif hasattr(base, "base") and hasattr(base, "var_map"):
base2base = base
while True:
base2base = base2base.var_map[base2base.base]
if isinstance(base2base, NewInstance):
call_name = "{} -> {}".format(base2base.type, name)
break
elif (hasattr(base2base, "base") and
hasattr(base2base, "var_map")):
continue
else:
call_name = "UNKNOWN_TODO"
break
elif isinstance(base, ThisParam):
call_name = "this -> %s" % name
elif isinstance(base, Variable):
call_name = "{} -> {}".format(base.type, name)
else:
call_name = "UNKNOWN_TODO2"
self.write('.%s' % name)
self.write_ext(('INVOKE', '.'))
self.write_ext(
('NAME_METHOD_INVOKE', '%s' % name, call_name, ptype, rtype,
base, invokeInstr))
self.write('(', data="PARAM_START")
comma = False
for arg in args:
if comma:
self.write(', ', data="PARAM_SEPARATOR")
comma = True
arg.visit(self)
self.write(')', data="PARAM_END")
def visit_return_void(self):
self.write_ind()
self.write('return', data="RETURN")
self.end_ins()
def visit_return(self, arg):
self.write_ind()
self.write('return ', data="RETURN")
arg.visit(self)
self.end_ins()
def visit_nop(self):
pass
def visit_switch(self, arg):
arg.visit(self)
def visit_check_cast(self, arg, atype):
self.write('((%s) ' % atype, data="CHECKCAST")
arg.visit(self)
self.write(')')
def visit_aload(self, array, index):
array.visit(self)
self.write('[', data="ALOAD_START")
index.visit(self)
self.write(']', data="ALOAD_END")
def visit_alength(self, array):
array.visit(self)
self.write('.length', data="ARRAY_LENGTH")
def visit_new_array(self, atype, size):
self.write('new %s[' % get_type(atype[1:]), data="NEW_ARRAY")
size.visit(self)
self.write(']', data="NEW_ARRAY_END")
def visit_filled_new_array(self, atype, size, args):
self.write('new %s {' % get_type(atype), data="NEW_ARRAY_FILLED")
for idx, arg in enumerate(args):
arg.visit(self)
if idx + 1 < len(args):
self.write(', ', data="COMMA")
self.write('})', data="NEW_ARRAY_FILLED_END")
def visit_fill_array(self, array, value):
self.write_ind()
array.visit(self)
self.write(' = {', data="ARRAY_FILLED")
data = value.get_data()
tab = []
elem_size = value.element_width
# Set type depending on size of elements
data_types = {1: 'b', 2: 'h', 4: 'i', 8: 'd'}
if elem_size in data_types:
elem_id = data_types[elem_size]
else:
# FIXME for other types we just assume bytes...
logger.warning("Unknown element size {} for array. Assume bytes.".format(elem_size))
elem_id = 'b'
elem_size = 1
for i in range(0, value.size*elem_size, elem_size):
tab.append('%s' % unpack(elem_id, data[i:i+elem_size])[0])
self.write(', '.join(tab), data="COMMA")
self.write('}', data="ARRAY_FILLED_END")
self.end_ins()
def visit_move_exception(self, var, data=None):
var.declared = True
var_type = var.get_type() or 'unknownType'
self.write('{} v{}'.format(get_type(var_type), var.name))
self.write_ext(('EXCEPTION_TYPE', '%s' % get_type(var_type), data.type))
self.write_ext(('SPACE', ' '))
self.write_ext(
('NAME_CLASS_EXCEPTION', 'v%s' % var.value(), data.type, data))
def visit_monitor_enter(self, ref):
self.write_ind()
self.write('synchronized(', data="SYNCHRONIZED")
ref.visit(self)
self.write(') {\n', data="SYNCHRONIZED_END")
self.inc_ind()
def visit_monitor_exit(self, ref):
self.dec_ind()
self.write_ind()
self.write('}\n', data="MONITOR_EXIT")
def visit_throw(self, ref):
self.write_ind()
self.write('throw ', data="THROW")
ref.visit(self)
self.end_ins()
def visit_binary_expression(self, op, arg1, arg2):
self.write('(', data="BINARY_EXPRESSION_START")
arg1.visit(self)
self.write(' %s ' % op, data="TODO58")
arg2.visit(self)
self.write(')', data="BINARY_EXPRESSION_END")
def visit_unary_expression(self, op, arg):
self.write('(%s ' % op, data="UNARY_EXPRESSION_START")
arg.visit(self)
self.write(')', data="UNARY_EXPRESSION_END")
def visit_cast(self, op, arg):
self.write('(%s ' % op, data="CAST_START")
arg.visit(self)
self.write(')', data="CAST_END")
def visit_cond_expression(self, op, arg1, arg2):
arg1.visit(self)
self.write(' %s ' % op, data="COND_EXPRESSION")
arg2.visit(self)
def visit_condz_expression(self, op, arg):
if isinstance(arg, BinaryCompExpression):
arg.op = op
return arg.visit(self)
atype = str(arg.get_type())
if atype == 'Z':
if op == Op.EQUAL:
self.write('!', data="NEGATE")
arg.visit(self)
else:
arg.visit(self)
try:
atype = atype.string
except AttributeError:
pass
if atype in 'VBSCIJFD':
self.write(' %s 0' % op, data="TODO64")
else:
self.write(' %s null' % op, data="TODO65")
def visit_get_instance(self, arg, name, data=None):
arg.visit(self)
self.write('.%s' % name)
self.write_ext(('GET_INSTANCE', '.'))
self.write_ext(('NAME_CLASS_INSTANCE', '%s' % name, data))
def visit_get_static(self, cls, name):
self.write('{}.{}'.format(cls, name), data="GET_STATIC")
def string(s):
"""
Convert a string to a escaped ASCII representation including quotation marks
:param s: a string
:return: ASCII escaped string
"""
ret = ['"']
for c in s:
if ' ' <= c < '\x7f':
if c == "'" or c == '"' or c == '\\':
ret.append('\\')
ret.append(c)
continue
elif c <= '\x7f':
if c in ('\r', '\n', '\t'):
# unicode-escape produces bytes
ret.append(c.encode('unicode-escape').decode("ascii"))
continue
i = ord(c)
ret.append('\\u')
ret.append('%x' % (i >> 12))
ret.append('%x' % ((i >> 8) & 0x0f))
ret.append('%x' % ((i >> 4) & 0x0f))
ret.append('%x' % (i & 0x0f))
ret.append('"')
return ''.join(ret)
| apache-2.0 |
Bulochkin/tensorflow_pack | tensorflow/contrib/bayesflow/python/kernel_tests/stochastic_variables_test.py | 80 | 6132 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stochastic graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.contrib.bayesflow.python.ops import stochastic_tensor
from tensorflow.contrib.bayesflow.python.ops import stochastic_variables
from tensorflow.contrib.bayesflow.python.ops import variational_inference_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
sv = stochastic_variables
st = stochastic_tensor
vi = variational_inference_impl
dist = distributions
class StochasticVariablesTest(test.TestCase):
def testStochasticVariables(self):
shape = (10, 20)
with variable_scope.variable_scope(
"stochastic_variables",
custom_getter=sv.make_stochastic_variable_getter(
dist_cls=dist.NormalWithSoftplusScale)):
v = variable_scope.get_variable("sv", shape)
self.assertTrue(isinstance(v, st.StochasticTensor))
self.assertTrue(isinstance(v.distribution, dist.NormalWithSoftplusScale))
self.assertEqual(
{"stochastic_variables/sv_loc", "stochastic_variables/sv_scale"},
set([v.op.name for v in variables.global_variables()]))
self.assertEqual(
set(variables.trainable_variables()), set(variables.global_variables()))
v = ops.convert_to_tensor(v)
self.assertEqual(list(shape), v.get_shape().as_list())
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertEqual(shape, sess.run(v).shape)
def testStochasticVariablesWithConstantInitializer(self):
shape = (10, 20)
with variable_scope.variable_scope(
"stochastic_variables",
custom_getter=sv.make_stochastic_variable_getter(
dist_cls=dist.NormalWithSoftplusScale,
dist_kwargs={"validate_args": True},
param_initializers={
"loc": np.ones(shape) * 4.,
"scale": np.ones(shape) * 2.
})):
v = variable_scope.get_variable("sv")
for var in variables.global_variables():
if "loc" in var.name:
mu_var = var
if "scale" in var.name:
sigma_var = var
v = ops.convert_to_tensor(v)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertAllEqual(np.ones(shape) * 4., sess.run(mu_var))
self.assertAllEqual(np.ones(shape) * 2., sess.run(sigma_var))
self.assertEqual(shape, sess.run(v).shape)
def testStochasticVariablesWithCallableInitializer(self):
shape = (10, 20)
def sigma_init(shape, dtype, partition_info):
_ = partition_info
return array_ops.ones(shape, dtype=dtype) * 2.
with variable_scope.variable_scope(
"stochastic_variables",
custom_getter=sv.make_stochastic_variable_getter(
dist_cls=dist.NormalWithSoftplusScale,
dist_kwargs={"validate_args": True},
param_initializers={
"loc": np.ones(
shape, dtype=np.float32) * 4.,
"scale": sigma_init
})):
v = variable_scope.get_variable("sv", shape)
for var in variables.global_variables():
if "loc" in var.name:
mu_var = var
if "scale" in var.name:
sigma_var = var
v = ops.convert_to_tensor(v)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertAllEqual(np.ones(shape) * 4., sess.run(mu_var))
self.assertAllEqual(np.ones(shape) * 2., sess.run(sigma_var))
self.assertEqual(shape, sess.run(v).shape)
def testStochasticVariablesWithPrior(self):
shape = (10, 20)
prior = dist.Normal(0., 1.)
with variable_scope.variable_scope(
"stochastic_variables",
custom_getter=sv.make_stochastic_variable_getter(
dist_cls=dist.NormalWithSoftplusScale, prior=prior)):
w = variable_scope.get_variable("weights", shape)
x = random_ops.random_uniform((8, 10))
y = math_ops.matmul(x, w)
prior_map = vi._find_variational_and_priors(y, None)
self.assertEqual(prior_map[w], prior)
elbo = vi.elbo(y, keep_batch_dim=False)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(elbo)
def testStochasticVariablesWithCallablePriorInitializer(self):
def prior_init(shape, dtype):
return dist.Normal(
array_ops.zeros(shape, dtype), array_ops.ones(shape, dtype))
with variable_scope.variable_scope(
"stochastic_variables",
custom_getter=sv.make_stochastic_variable_getter(
dist_cls=dist.NormalWithSoftplusScale, prior=prior_init)):
w = variable_scope.get_variable("weights", (10, 20))
x = random_ops.random_uniform((8, 10))
y = math_ops.matmul(x, w)
prior_map = vi._find_variational_and_priors(y, None)
self.assertTrue(isinstance(prior_map[w], dist.Normal))
elbo = vi.elbo(y, keep_batch_dim=False)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(elbo)
if __name__ == "__main__":
test.main()
| apache-2.0 |
devendermishrajio/nova | nova/api/openstack/compute/schemas/evacuate.py | 83 | 1175 | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.validation import parameter_types
evacuate = {
'type': 'object',
'properties': {
'evacuate': {
'type': 'object',
'properties': {
'host': parameter_types.hostname,
'onSharedStorage': parameter_types.boolean,
'adminPass': parameter_types.admin_password,
},
'required': ['onSharedStorage'],
'additionalProperties': False,
},
},
'required': ['evacuate'],
'additionalProperties': False,
}
| apache-2.0 |
tiagocoutinho/bliss | tests/test_counters.py | 1 | 2255 | # -*- coding: utf-8 -*-
#
# This file is part of the bliss project
#
# Copyright (c) 2016 Beamline Control Unit, ESRF
# Distributed under the GNU LGPLv3. See LICENSE for more info.
import pytest
import numpy
from bliss.common.measurement import SamplingCounter, IntegratingCounter
class Diode(SamplingCounter):
def __init__(self, diode, convert_func):
SamplingCounter.__init__(self, 'test_diode', None,
grouped_read_handler = None,
conversion_function = convert_func)
self.diode = diode
def read(self, *args):
self.last_read_value = self.diode.read()
return self.last_read_value
class DiodeWithController(SamplingCounter):
def __init__(self, diode, convert_func):
SamplingCounter.__init__(self, 'test_diode', diode.controller,
grouped_read_handler = None,
conversion_function = convert_func)
self.diode = diode
class AcquisitionController:
pass
class IntegCounter(IntegratingCounter):
def __init__(self, acq_controller, convert_func):
IntegratingCounter.__init__(self, 'test_integ_diode', None, acq_controller,
grouped_read_handler = None,
conversion_function = convert_func)
def get_values(self, from_index):
return numpy.random.random(20)
def test_diode(beacon):
diode = beacon.get("diode")
def multiply_by_two(x):
return 2*x
test_diode = Diode(diode, multiply_by_two)
diode_value = test_diode.read()
assert test_diode.last_read_value*2 == diode_value
def test_diode_with_controller(beacon):
diode = beacon.get("diode")
def multiply_by_two(x):
diode.raw_value = x
return 2*x
test_diode = Diode(diode, multiply_by_two)
diode_value = test_diode.read()
assert diode.raw_value*2 == diode_value
def test_integ_counter(beacon):
acq_controller = AcquisitionController()
def multiply_by_two(x):
acq_controller.raw_value = x
return 2*x
counter = IntegCounter(acq_controller, multiply_by_two)
assert list(counter.get_values(0)) == list(2*acq_controller.raw_value)
| lgpl-3.0 |
kustodian/ansible | lib/ansible/modules/packaging/os/redhat_subscription.py | 7 | 34577 | #!/usr/bin/python
# James Laska (jlaska@redhat.com)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: redhat_subscription
short_description: Manage registration and subscriptions to RHSM using the C(subscription-manager) command
description:
- Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) command
version_added: "1.2"
author: "Barnaby Court (@barnabycourt)"
notes:
- In order to register a system, subscription-manager requires either a username and password, or an activationkey and an Organization ID.
- Since 2.5 values for I(server_hostname), I(server_insecure), I(rhsm_baseurl),
I(server_proxy_hostname), I(server_proxy_port), I(server_proxy_user) and
I(server_proxy_password) are no longer taken from the C(/etc/rhsm/rhsm.conf)
config file and default to None.
requirements:
- subscription-manager
options:
state:
description:
- whether to register and subscribe (C(present)), or unregister (C(absent)) a system
choices: [ "present", "absent" ]
default: "present"
username:
description:
- access.redhat.com or Sat6 username
password:
description:
- access.redhat.com or Sat6 password
server_hostname:
description:
- Specify an alternative Red Hat Subscription Management or Sat6 server
server_insecure:
description:
- Enable or disable https server certificate verification when connecting to C(server_hostname)
rhsm_baseurl:
description:
- Specify CDN baseurl
rhsm_repo_ca_cert:
description:
- Specify an alternative location for a CA certificate for CDN
version_added: "2.7"
server_proxy_hostname:
description:
- Specify a HTTP proxy hostname
version_added: "2.4"
server_proxy_port:
description:
- Specify a HTTP proxy port
version_added: "2.4"
server_proxy_user:
description:
- Specify a user for HTTP proxy with basic authentication
version_added: "2.4"
server_proxy_password:
description:
- Specify a password for HTTP proxy with basic authentication
version_added: "2.4"
auto_attach:
description:
- Upon successful registration, auto-consume available subscriptions
- Added in favor of deprecated autosubscribe in 2.5.
type: bool
default: 'no'
version_added: "2.5"
aliases: [autosubscribe]
activationkey:
description:
- supply an activation key for use with registration
org_id:
description:
- Organization ID to use in conjunction with activationkey
version_added: "2.0"
environment:
description:
- Register with a specific environment in the destination org. Used with Red Hat Satellite 6.x or Katello
version_added: "2.2"
pool:
description:
- |
Specify a subscription pool name to consume. Regular expressions accepted. Use I(pool_ids) instead if
possible, as it is much faster. Mutually exclusive with I(pool_ids).
default: '^$'
pool_ids:
description:
- |
Specify subscription pool IDs to consume. Prefer over I(pool) when possible as it is much faster.
A pool ID may be specified as a C(string) - just the pool ID (ex. C(0123456789abcdef0123456789abcdef)),
or as a C(dict) with the pool ID as the key, and a quantity as the value (ex.
C(0123456789abcdef0123456789abcdef: 2). If the quantity is provided, it is used to consume multiple
entitlements from a pool (the pool must support this). Mutually exclusive with I(pool).
default: []
version_added: "2.4"
consumer_type:
description:
- The type of unit to register, defaults to system
version_added: "2.1"
consumer_name:
description:
- Name of the system to register, defaults to the hostname
version_added: "2.1"
consumer_id:
description:
- |
References an existing consumer ID to resume using a previous registration
for this system. If the system's identity certificate is lost or corrupted,
this option allows it to resume using its previous identity and subscriptions.
The default is to not specify a consumer ID so a new ID is created.
version_added: "2.1"
force_register:
description:
- Register the system even if it is already registered
type: bool
default: 'no'
version_added: "2.2"
release:
description:
- Set a release version
version_added: "2.8"
syspurpose:
description:
- Set syspurpose attributes in file C(/etc/rhsm/syspurpose/syspurpose.json)
and synchronize these attributes with RHSM server. Syspurpose attributes help attach
the most appropriate subscriptions to the system automatically. When C(syspurpose.json) file
already contains some attributes, then new attributes overwrite existing attributes.
When some attribute is not listed in the new list of attributes, the existing
attribute will be removed from C(syspurpose.json) file. Unknown attributes are ignored.
type: dict
default: {}
version_added: "2.9"
suboptions:
usage:
description: Syspurpose attribute usage
role:
description: Syspurpose attribute role
service_level_agreement:
description: Syspurpose attribute service_level_agreement
addons:
description: Syspurpose attribute addons
type: list
sync:
description:
- When this option is true, then syspurpose attributes are synchronized with
RHSM server immediately. When this option is false, then syspurpose attributes
will be synchronized with RHSM server by rhsmcertd daemon.
type: bool
default: False
'''
EXAMPLES = '''
- name: Register as user (joe_user) with password (somepass) and auto-subscribe to available content.
redhat_subscription:
state: present
username: joe_user
password: somepass
auto_attach: true
- name: Same as above but subscribe to a specific pool by ID.
redhat_subscription:
state: present
username: joe_user
password: somepass
pool_ids: 0123456789abcdef0123456789abcdef
- name: Register and subscribe to multiple pools.
redhat_subscription:
state: present
username: joe_user
password: somepass
pool_ids:
- 0123456789abcdef0123456789abcdef
- 1123456789abcdef0123456789abcdef
- name: Same as above but consume multiple entitlements.
redhat_subscription:
state: present
username: joe_user
password: somepass
pool_ids:
- 0123456789abcdef0123456789abcdef: 2
- 1123456789abcdef0123456789abcdef: 4
- name: Register and pull existing system data.
redhat_subscription:
state: present
username: joe_user
password: somepass
consumer_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- name: Register with activationkey and consume subscriptions matching Red Hat Enterprise Server or Red Hat Virtualization
redhat_subscription:
state: present
activationkey: 1-222333444
org_id: 222333444
pool: '^(Red Hat Enterprise Server|Red Hat Virtualization)$'
- name: Update the consumed subscriptions from the previous example (remove Red Hat Virtualization subscription)
redhat_subscription:
state: present
activationkey: 1-222333444
org_id: 222333444
pool: '^Red Hat Enterprise Server$'
- name: Register as user credentials into given environment (against Red Hat Satellite 6.x), and auto-subscribe.
redhat_subscription:
state: present
username: joe_user
password: somepass
environment: Library
auto_attach: true
- name: Register as user (joe_user) with password (somepass) and a specific release
redhat_subscription:
state: present
username: joe_user
password: somepass
release: 7.4
- name: Register as user (joe_user) with password (somepass), set syspurpose attributes and synchronize them with server
redhat_subscription:
state: present
username: joe_user
password: somepass
auto_attach: true
syspurpose:
usage: "Production"
role: "Red Hat Enterprise Server"
service_level_agreement: "Premium"
addons:
- addon1
- addon2
sync: true
'''
RETURN = '''
subscribed_pool_ids:
description: List of pool IDs to which system is now subscribed
returned: success
type: complex
sample: {
"8a85f9815ab905d3015ab928c7005de4": "1"
}
'''
from os.path import isfile
from os import unlink
import re
import shutil
import tempfile
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.six.moves import configparser
SUBMAN_CMD = None
class RegistrationBase(object):
REDHAT_REPO = "/etc/yum.repos.d/redhat.repo"
def __init__(self, module, username=None, password=None):
self.module = module
self.username = username
self.password = password
def configure(self):
raise NotImplementedError("Must be implemented by a sub-class")
def enable(self):
# Remove any existing redhat.repo
if isfile(self.REDHAT_REPO):
unlink(self.REDHAT_REPO)
def register(self):
raise NotImplementedError("Must be implemented by a sub-class")
def unregister(self):
raise NotImplementedError("Must be implemented by a sub-class")
def unsubscribe(self):
raise NotImplementedError("Must be implemented by a sub-class")
def update_plugin_conf(self, plugin, enabled=True):
plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
if isfile(plugin_conf):
tmpfd, tmpfile = tempfile.mkstemp()
shutil.copy2(plugin_conf, tmpfile)
cfg = configparser.ConfigParser()
cfg.read([tmpfile])
if enabled:
cfg.set('main', 'enabled', '1')
else:
cfg.set('main', 'enabled', '0')
fd = open(tmpfile, 'w+')
cfg.write(fd)
fd.close()
self.module.atomic_move(tmpfile, plugin_conf)
def subscribe(self, **kwargs):
raise NotImplementedError("Must be implemented by a sub-class")
class Rhsm(RegistrationBase):
def __init__(self, module, username=None, password=None):
RegistrationBase.__init__(self, module, username, password)
self.module = module
def enable(self):
'''
Enable the system to receive updates from subscription-manager.
This involves updating affected yum plugins and removing any
conflicting yum repositories.
'''
RegistrationBase.enable(self)
self.update_plugin_conf('rhnplugin', False)
self.update_plugin_conf('subscription-manager', True)
def configure(self, **kwargs):
'''
Configure the system as directed for registration with RHSM
Raises:
* Exception - if error occurs while running command
'''
args = [SUBMAN_CMD, 'config']
# Pass supplied **kwargs as parameters to subscription-manager. Ignore
# non-configuration parameters and replace '_' with '.'. For example,
# 'server_hostname' becomes '--server.hostname'.
options = []
for k, v in sorted(kwargs.items()):
if re.search(r'^(server|rhsm)_', k) and v is not None:
options.append('--%s=%s' % (k.replace('_', '.', 1), v))
# When there is nothing to configure, then it is not necessary
# to run config command, because it only returns current
# content of current configuration file
if len(options) == 0:
return
args.extend(options)
self.module.run_command(args, check_rc=True)
@property
def is_registered(self):
'''
Determine whether the current system
Returns:
* Boolean - whether the current system is currently registered to
RHSM.
'''
args = [SUBMAN_CMD, 'identity']
rc, stdout, stderr = self.module.run_command(args, check_rc=False)
if rc == 0:
return True
else:
return False
def register(self, username, password, auto_attach, activationkey, org_id,
consumer_type, consumer_name, consumer_id, force_register, environment,
rhsm_baseurl, server_insecure, server_hostname, server_proxy_hostname,
server_proxy_port, server_proxy_user, server_proxy_password, release):
'''
Register the current system to the provided RHSM or Sat6 server
Raises:
* Exception - if error occurs while running command
'''
args = [SUBMAN_CMD, 'register']
# Generate command arguments
if force_register:
args.extend(['--force'])
if rhsm_baseurl:
args.extend(['--baseurl', rhsm_baseurl])
if server_insecure:
args.extend(['--insecure'])
if server_hostname:
args.extend(['--serverurl', server_hostname])
if org_id:
args.extend(['--org', org_id])
if server_proxy_hostname and server_proxy_port:
args.extend(['--proxy', server_proxy_hostname + ':' + server_proxy_port])
if server_proxy_user:
args.extend(['--proxyuser', server_proxy_user])
if server_proxy_password:
args.extend(['--proxypassword', server_proxy_password])
if activationkey:
args.extend(['--activationkey', activationkey])
else:
if auto_attach:
args.append('--auto-attach')
if username:
args.extend(['--username', username])
if password:
args.extend(['--password', password])
if consumer_type:
args.extend(['--type', consumer_type])
if consumer_name:
args.extend(['--name', consumer_name])
if consumer_id:
args.extend(['--consumerid', consumer_id])
if environment:
args.extend(['--environment', environment])
if release:
args.extend(['--release', release])
rc, stderr, stdout = self.module.run_command(args, check_rc=True, expand_user_and_vars=False)
def unsubscribe(self, serials=None):
'''
Unsubscribe a system from subscribed channels
Args:
serials(list or None): list of serials to unsubscribe. If
serials is none or an empty list, then
all subscribed channels will be removed.
Raises:
* Exception - if error occurs while running command
'''
items = []
if serials is not None and serials:
items = ["--serial=%s" % s for s in serials]
if serials is None:
items = ["--all"]
if items:
args = [SUBMAN_CMD, 'unsubscribe'] + items
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
return serials
def unregister(self):
'''
Unregister a currently registered system
Raises:
* Exception - if error occurs while running command
'''
args = [SUBMAN_CMD, 'unregister']
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
self.update_plugin_conf('rhnplugin', False)
self.update_plugin_conf('subscription-manager', False)
def subscribe(self, regexp):
'''
Subscribe current system to available pools matching the specified
regular expression. It matches regexp against available pool ids first.
If any pool ids match, subscribe to those pools and return.
If no pool ids match, then match regexp against available pool product
names. Note this can still easily match many many pools. Then subscribe
to those pools.
Since a pool id is a more specific match, we only fallback to matching
against names if we didn't match pool ids.
Raises:
* Exception - if error occurs while running command
'''
# See https://github.com/ansible/ansible/issues/19466
# subscribe to pools whose pool id matches regexp (and only the pool id)
subscribed_pool_ids = self.subscribe_pool(regexp)
# If we found any matches, we are done
# Don't attempt to match pools by product name
if subscribed_pool_ids:
return subscribed_pool_ids
# We didn't match any pool ids.
# Now try subscribing to pools based on product name match
# Note: This can match lots of product names.
subscribed_by_product_pool_ids = self.subscribe_product(regexp)
if subscribed_by_product_pool_ids:
return subscribed_by_product_pool_ids
# no matches
return []
def subscribe_by_pool_ids(self, pool_ids):
"""
Try to subscribe to the list of pool IDs
"""
available_pools = RhsmPools(self.module)
available_pool_ids = [p.get_pool_id() for p in available_pools]
for pool_id, quantity in sorted(pool_ids.items()):
if pool_id in available_pool_ids:
args = [SUBMAN_CMD, 'attach', '--pool', pool_id, '--quantity', quantity]
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
else:
self.module.fail_json(msg='Pool ID: %s not in list of available pools' % pool_id)
return pool_ids
def subscribe_pool(self, regexp):
'''
Subscribe current system to available pools matching the specified
regular expression
Raises:
* Exception - if error occurs while running command
'''
# Available pools ready for subscription
available_pools = RhsmPools(self.module)
subscribed_pool_ids = []
for pool in available_pools.filter_pools(regexp):
pool.subscribe()
subscribed_pool_ids.append(pool.get_pool_id())
return subscribed_pool_ids
def subscribe_product(self, regexp):
'''
Subscribe current system to available pools matching the specified
regular expression
Raises:
* Exception - if error occurs while running command
'''
# Available pools ready for subscription
available_pools = RhsmPools(self.module)
subscribed_pool_ids = []
for pool in available_pools.filter_products(regexp):
pool.subscribe()
subscribed_pool_ids.append(pool.get_pool_id())
return subscribed_pool_ids
def update_subscriptions(self, regexp):
changed = False
consumed_pools = RhsmPools(self.module, consumed=True)
pool_ids_to_keep = [p.get_pool_id() for p in consumed_pools.filter_pools(regexp)]
pool_ids_to_keep.extend([p.get_pool_id() for p in consumed_pools.filter_products(regexp)])
serials_to_remove = [p.Serial for p in consumed_pools if p.get_pool_id() not in pool_ids_to_keep]
serials = self.unsubscribe(serials=serials_to_remove)
subscribed_pool_ids = self.subscribe(regexp)
if subscribed_pool_ids or serials:
changed = True
return {'changed': changed, 'subscribed_pool_ids': subscribed_pool_ids,
'unsubscribed_serials': serials}
def update_subscriptions_by_pool_ids(self, pool_ids):
changed = False
consumed_pools = RhsmPools(self.module, consumed=True)
existing_pools = {}
for p in consumed_pools:
existing_pools[p.get_pool_id()] = p.QuantityUsed
serials_to_remove = [p.Serial for p in consumed_pools if pool_ids.get(p.get_pool_id(), 0) != p.QuantityUsed]
serials = self.unsubscribe(serials=serials_to_remove)
missing_pools = {}
for pool_id, quantity in sorted(pool_ids.items()):
if existing_pools.get(pool_id, 0) != quantity:
missing_pools[pool_id] = quantity
self.subscribe_by_pool_ids(missing_pools)
if missing_pools or serials:
changed = True
return {'changed': changed, 'subscribed_pool_ids': missing_pools.keys(),
'unsubscribed_serials': serials}
def sync_syspurpose(self):
"""
Try to synchronize syspurpose attributes with server
"""
args = [SUBMAN_CMD, 'status']
rc, stdout, stderr = self.module.run_command(args, check_rc=False)
class RhsmPool(object):
'''
Convenience class for housing subscription information
'''
def __init__(self, module, **kwargs):
self.module = module
for k, v in kwargs.items():
setattr(self, k, v)
def __str__(self):
return str(self.__getattribute__('_name'))
def get_pool_id(self):
return getattr(self, 'PoolId', getattr(self, 'PoolID'))
def subscribe(self):
args = "subscription-manager attach --pool %s" % self.get_pool_id()
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
if rc == 0:
return True
else:
return False
class RhsmPools(object):
"""
This class is used for manipulating pools subscriptions with RHSM
"""
def __init__(self, module, consumed=False):
self.module = module
self.products = self._load_product_list(consumed)
def __iter__(self):
return self.products.__iter__()
def _load_product_list(self, consumed=False):
"""
Loads list of all available or consumed pools for system in data structure
Args:
consumed(bool): if True list consumed pools, else list available pools (default False)
"""
args = "subscription-manager list"
if consumed:
args += " --consumed"
else:
args += " --available"
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, stdout, stderr = self.module.run_command(args, check_rc=True, environ_update=lang_env)
products = []
for line in stdout.split('\n'):
# Remove leading+trailing whitespace
line = line.strip()
# An empty line implies the end of a output group
if len(line) == 0:
continue
# If a colon ':' is found, parse
elif ':' in line:
(key, value) = line.split(':', 1)
key = key.strip().replace(" ", "") # To unify
value = value.strip()
if key in ['ProductName', 'SubscriptionName']:
# Remember the name for later processing
products.append(RhsmPool(self.module, _name=value, key=value))
elif products:
# Associate value with most recently recorded product
products[-1].__setattr__(key, value)
# FIXME - log some warning?
# else:
# warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value))
return products
def filter_pools(self, regexp='^$'):
'''
Return a list of RhsmPools whose pool id matches the provided regular expression
'''
r = re.compile(regexp)
for product in self.products:
if r.search(product.get_pool_id()):
yield product
def filter_products(self, regexp='^$'):
'''
Return a list of RhsmPools whose product name matches the provided regular expression
'''
r = re.compile(regexp)
for product in self.products:
if r.search(product._name):
yield product
class SysPurpose(object):
"""
This class is used for reading and writing to syspurpose.json file
"""
SYSPURPOSE_FILE_PATH = "/etc/rhsm/syspurpose/syspurpose.json"
ALLOWED_ATTRIBUTES = ['role', 'usage', 'service_level_agreement', 'addons']
def __init__(self, path=None):
"""
Initialize class used for reading syspurpose json file
"""
self.path = path or self.SYSPURPOSE_FILE_PATH
def update_syspurpose(self, new_syspurpose):
"""
Try to update current syspurpose with new attributes from new_syspurpose
"""
syspurpose = {}
syspurpose_changed = False
for key, value in new_syspurpose.items():
if key in self.ALLOWED_ATTRIBUTES:
if value is not None:
syspurpose[key] = value
elif key == 'sync':
pass
else:
raise KeyError("Attribute: %s not in list of allowed attributes: %s" %
(key, self.ALLOWED_ATTRIBUTES))
current_syspurpose = self._read_syspurpose()
if current_syspurpose != syspurpose:
syspurpose_changed = True
# Update current syspurpose with new values
current_syspurpose.update(syspurpose)
# When some key is not listed in new syspurpose, then delete it from current syspurpose
# and ignore custom attributes created by user (e.g. "foo": "bar")
for key in list(current_syspurpose):
if key in self.ALLOWED_ATTRIBUTES and key not in syspurpose:
del current_syspurpose[key]
self._write_syspurpose(current_syspurpose)
return syspurpose_changed
def _write_syspurpose(self, new_syspurpose):
"""
This function tries to update current new_syspurpose attributes to
json file.
"""
with open(self.path, "w") as fp:
fp.write(json.dumps(new_syspurpose, indent=2, ensure_ascii=False, sort_keys=True))
def _read_syspurpose(self):
"""
Read current syspurpuse from json file.
"""
current_syspurpose = {}
try:
with open(self.path, "r") as fp:
content = fp.read()
except IOError:
pass
else:
current_syspurpose = json.loads(content)
return current_syspurpose
def main():
# Load RHSM configuration from file
rhsm = Rhsm(None)
# Note: the default values for parameters are:
# 'type': 'str', 'default': None, 'required': False
# So there is no need to repeat these values for each parameter.
module = AnsibleModule(
argument_spec={
'state': {'default': 'present', 'choices': ['present', 'absent']},
'username': {},
'password': {'no_log': True},
'server_hostname': {},
'server_insecure': {},
'rhsm_baseurl': {},
'rhsm_repo_ca_cert': {},
'auto_attach': {'aliases': ['autosubscribe'], 'type': 'bool'},
'activationkey': {'no_log': True},
'org_id': {},
'environment': {},
'pool': {'default': '^$'},
'pool_ids': {'default': [], 'type': 'list'},
'consumer_type': {},
'consumer_name': {},
'consumer_id': {},
'force_register': {'default': False, 'type': 'bool'},
'server_proxy_hostname': {},
'server_proxy_port': {},
'server_proxy_user': {},
'server_proxy_password': {'no_log': True},
'release': {},
'syspurpose': {
'type': 'dict',
'options': {
'role': {},
'usage': {},
'service_level_agreement': {},
'addons': {'type': 'list'},
'sync': {'type': 'bool', 'default': False}
}
}
},
required_together=[['username', 'password'],
['server_proxy_hostname', 'server_proxy_port'],
['server_proxy_user', 'server_proxy_password']],
mutually_exclusive=[['activationkey', 'username'],
['activationkey', 'consumer_id'],
['activationkey', 'environment'],
['activationkey', 'autosubscribe'],
['force', 'consumer_id'],
['pool', 'pool_ids']],
required_if=[['state', 'present', ['username', 'activationkey'], True]],
)
rhsm.module = module
state = module.params['state']
username = module.params['username']
password = module.params['password']
server_hostname = module.params['server_hostname']
server_insecure = module.params['server_insecure']
rhsm_baseurl = module.params['rhsm_baseurl']
rhsm_repo_ca_cert = module.params['rhsm_repo_ca_cert']
auto_attach = module.params['auto_attach']
activationkey = module.params['activationkey']
org_id = module.params['org_id']
if activationkey and not org_id:
module.fail_json(msg='org_id is required when using activationkey')
environment = module.params['environment']
pool = module.params['pool']
pool_ids = {}
for value in module.params['pool_ids']:
if isinstance(value, dict):
if len(value) != 1:
module.fail_json(msg='Unable to parse pool_ids option.')
pool_id, quantity = list(value.items())[0]
else:
pool_id, quantity = value, 1
pool_ids[pool_id] = str(quantity)
consumer_type = module.params["consumer_type"]
consumer_name = module.params["consumer_name"]
consumer_id = module.params["consumer_id"]
force_register = module.params["force_register"]
server_proxy_hostname = module.params['server_proxy_hostname']
server_proxy_port = module.params['server_proxy_port']
server_proxy_user = module.params['server_proxy_user']
server_proxy_password = module.params['server_proxy_password']
release = module.params['release']
syspurpose = module.params['syspurpose']
global SUBMAN_CMD
SUBMAN_CMD = module.get_bin_path('subscription-manager', True)
syspurpose_changed = False
if syspurpose is not None:
try:
syspurpose_changed = SysPurpose().update_syspurpose(syspurpose)
except Exception as err:
module.fail_json(msg="Failed to update syspurpose attributes: %s" % to_native(err))
# Ensure system is registered
if state == 'present':
# Register system
if rhsm.is_registered and not force_register:
if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True:
try:
rhsm.sync_syspurpose()
except Exception as e:
module.fail_json(msg="Failed to synchronize syspurpose attributes: %s" % to_native(e))
if pool != '^$' or pool_ids:
try:
if pool_ids:
result = rhsm.update_subscriptions_by_pool_ids(pool_ids)
else:
result = rhsm.update_subscriptions(pool)
except Exception as e:
module.fail_json(msg="Failed to update subscriptions for '%s': %s" % (server_hostname, to_native(e)))
else:
module.exit_json(**result)
else:
if syspurpose_changed is True:
module.exit_json(changed=True, msg="Syspurpose attributes changed.")
else:
module.exit_json(changed=False, msg="System already registered.")
else:
try:
rhsm.enable()
rhsm.configure(**module.params)
rhsm.register(username, password, auto_attach, activationkey, org_id,
consumer_type, consumer_name, consumer_id, force_register,
environment, rhsm_baseurl, server_insecure, server_hostname,
server_proxy_hostname, server_proxy_port, server_proxy_user, server_proxy_password, release)
if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True:
rhsm.sync_syspurpose()
if pool_ids:
subscribed_pool_ids = rhsm.subscribe_by_pool_ids(pool_ids)
elif pool != '^$':
subscribed_pool_ids = rhsm.subscribe(pool)
else:
subscribed_pool_ids = []
except Exception as e:
module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, to_native(e)))
else:
module.exit_json(changed=True,
msg="System successfully registered to '%s'." % server_hostname,
subscribed_pool_ids=subscribed_pool_ids)
# Ensure system is *not* registered
if state == 'absent':
if not rhsm.is_registered:
module.exit_json(changed=False, msg="System already unregistered.")
else:
try:
rhsm.unsubscribe()
rhsm.unregister()
except Exception as e:
module.fail_json(msg="Failed to unregister: %s" % to_native(e))
else:
module.exit_json(changed=True, msg="System successfully unregistered from %s." % server_hostname)
if __name__ == '__main__':
main()
| gpl-3.0 |
jnobre/lxmls-toolkit-2017 | lxmls/pos_tagging/all_train_pos_tag.py | 2 | 3145 | import sys
import codecs
from sequences.sequence import *
from sequences.sequence_list import *
import readers.pos_corpus as pcc
import readers.brown_pos_corpus as bpc
import sequences.extended_feature as exfc
import sequences.structured_perceptron as spc
import sequences.confusion_matrix as bcm
MAX_SENT_SIZE = 1000
MAX_NR_SENTENCES = 100000
MODEL_DIR = "/Users/graca/Projects/swm_src/feeds/models/all_data_postag/"
def build_corpus_features():
corpus = pcc.PostagCorpus()
train_seq = corpus.read_sequence_list_conll(
"../../data/train-02-21.conll",
max_sent_len=MAX_SENT_SIZE,
max_nr_sent=MAX_NR_SENTENCES)
corpus.add_sequence_list(train_seq)
dev_seq = corpus.read_sequence_list_conll("../../data/dev-22.conll")
corpus.add_sequence_list(dev_seq)
categories = [
'adventure',
'belles_lettres',
'editorial',
'fiction',
'government',
'hobbies',
'humor',
'learned',
'lore',
'mystery',
'news',
'religion',
'reviews',
'romance']
for cat in categories:
brown_seq = corpus.read_sequence_list_brown(categories=cat)
corpus.add_sequence_list(brown_seq)
features = exfc.ExtendedFeatures(corpus)
features.build_features()
corpus.save_corpus(MODEL_DIR)
features.save_features(MODEL_DIR+"features.txt")
return corpus, features
def train_pos(corpus, features):
model = spc.StructuredPercetron(corpus, features)
model.nr_rounds = 10
model.train_supervised(corpus.sequence_list.seq_list)
model.save_model(MODEL_DIR)
return model
def eval_model(corpus, features, model):
test_seq = corpus.read_sequence_list_conll("../../data/test-23.conll")
pred_test = model.viterbi_decode_corpus_log(test_seq.seq_list)
eval_test = model.evaluate_corpus(test_seq.seq_list, pred_test)
print "Accuracy on wsj test %f" % eval_test
def eval_brown(corpus, features, model):
categories = ['science_fiction']
for cat in categories:
brown_seq = corpus.read_sequence_list_brown(categories=cat)
brown_pred = model.viterbi_decode_corpus_log(brown_seq.seq_list)
brown_eval = model.evaluate_corpus(brown_seq.seq_list, brown_pred)
print "Accuracy on Brown cat %s: %f" % (cat, brown_eval)
def load_model():
corpus = pcc.PostagCorpus()
corpus.load_corpus(MODEL_DIR)
features = exfc.ExtendedFeatures(corpus)
features.load_features(MODEL_DIR+"features.txt", corpus)
model = spc.StructuredPercetron(corpus, features)
model.load_model(MODEL_DIR)
return corpus, features, model
def main():
print "Building corpus"
corpus, features = build_corpus_features()
print "Training model"
model = train_pos(corpus, features)
print "Testing on wsj"
eval_model(corpus, features, model)
print "Testing on brown"
eval_brown(corpus, features, model)
# print "Loading models"
# corpus,features,model = load_model()
# print "Testing on wsj"
# eval_model(corpus,features,model)
# print "Testing on brown"
# eval_brown(corpus,features,model)
| mit |
henaras/sahara | sahara/plugins/vanilla/utils.py | 10 | 1562 | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins import utils as u
def get_namenode(cluster):
return u.get_instance(cluster, "namenode")
def get_jobtracker(cluster):
instance = u.get_instance(cluster, "jobtracker")
return instance
def get_resourcemanager(cluster):
return u.get_instance(cluster, 'resourcemanager')
def get_nodemanagers(cluster):
return u.get_instances(cluster, 'nodemanager')
def get_oozie(cluster):
return u.get_instance(cluster, "oozie")
def get_hiveserver(cluster):
return u.get_instance(cluster, "hiveserver")
def get_datanodes(cluster):
return u.get_instances(cluster, 'datanode')
def get_tasktrackers(cluster):
return u.get_instances(cluster, 'tasktracker')
def get_secondarynamenodes(cluster):
return u.get_instances(cluster, 'secondarynamenode')
def get_historyserver(cluster):
return u.get_instance(cluster, 'historyserver')
def get_instance_hostname(instance):
return instance.hostname() if instance else None
| apache-2.0 |
sephii/django-cms | cms/signals/plugins.py | 11 | 2026 | # -*- coding: utf-8 -*-
from cms.constants import PUBLISHER_STATE_DIRTY
from cms.models import CMSPlugin, Title, Page, StaticPlaceholder, Placeholder
def get_placeholder(plugin):
if plugin.placeholder_id:
try:
return plugin.placeholder
except Placeholder.DoesNotExist:
return None
else:
return plugin.placeholder
def set_dirty(plugin, delete_cache=True):
placeholder = get_placeholder(plugin)
if placeholder:
language = plugin.language
if delete_cache:
from django.core.cache import cache
key = placeholder.get_cache_key(language)
cache.delete(key)
attached_model = placeholder._get_attached_model()
if attached_model is Page:
Title.objects.filter(page=placeholder.page, language=language).update(publisher_state=PUBLISHER_STATE_DIRTY)
elif attached_model is StaticPlaceholder:
StaticPlaceholder.objects.filter(draft=placeholder).update(dirty=True)
def pre_save_plugins(**kwargs):
plugin = kwargs['instance']
if hasattr(plugin, '_no_reorder'):
return
set_dirty(plugin)
if plugin.pk:
try:
old_plugin = CMSPlugin.objects.get(pk=plugin.pk)
except CMSPlugin.DoesNotExist:
pass
else:
if old_plugin.placeholder_id != plugin.placeholder_id:
set_dirty(old_plugin, delete_cache=False)
def pre_delete_plugins(**kwargs):
plugin = kwargs['instance']
if hasattr(plugin, '_no_reorder'):
return
set_dirty(plugin)
def post_delete_plugins(**kwargs):
plugin = kwargs['instance']
if hasattr(plugin, '_no_reorder'):
return
plugins = CMSPlugin.objects.filter(language=plugin.language, placeholder=plugin.placeholder_id,
parent=plugin.parent_id).order_by('position')
for pos, p in enumerate(plugins):
if p.position != pos:
p.position = pos
p.save()
| bsd-3-clause |
hefen1/chromium | tools/telemetry/telemetry/core/web_contents.py | 2 | 6605 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.core import util
DEFAULT_WEB_CONTENTS_TIMEOUT = 90
# TODO(achuith, dtu, nduca): Add unit tests specifically for WebContents,
# independent of Tab.
class WebContents(object):
"""Represents web contents in the browser"""
def __init__(self, inspector_backend):
self._inspector_backend = inspector_backend
with open(os.path.join(os.path.dirname(__file__),
'network_quiescence.js')) as f:
self._quiescence_js = f.read()
@property
def id(self):
"""Return the unique id string for this tab object."""
return self._inspector_backend.id
def WaitForDocumentReadyStateToBeComplete(self,
timeout=DEFAULT_WEB_CONTENTS_TIMEOUT):
self.WaitForJavaScriptExpression(
'document.readyState == "complete"', timeout)
def WaitForDocumentReadyStateToBeInteractiveOrBetter(self,
timeout=DEFAULT_WEB_CONTENTS_TIMEOUT):
self.WaitForJavaScriptExpression(
'document.readyState == "interactive" || '
'document.readyState == "complete"', timeout)
def WaitForJavaScriptExpression(self, expr, timeout):
"""Waits for the given JavaScript expression to be True.
This method is robust against any given Evaluation timing out.
"""
def IsJavaScriptExpressionTrue():
try:
return bool(self.EvaluateJavaScript(expr))
except util.TimeoutException:
# If the main thread is busy for longer than Evaluate's timeout, we
# may time out here early. Instead, we want to wait for the full
# timeout of this method.
return False
try:
util.WaitFor(IsJavaScriptExpressionTrue, timeout)
except util.TimeoutException as e:
# Try to make timeouts a little more actionable by dumping |this|.
raise util.TimeoutException(e.message + self.EvaluateJavaScript("""
(function() {
var error = '\\n\\nJavaScript |this|:\\n';
for (name in this) {
try {
error += '\\t' + name + ': ' + this[name] + '\\n';
} catch (e) {
error += '\\t' + name + ': ???\\n';
}
}
if (window && window.document) {
error += '\\n\\nJavaScript window.document:\\n';
for (name in window.document) {
try {
error += '\\t' + name + ': ' + window.document[name] + '\\n';
} catch (e) {
error += '\\t' + name + ': ???\\n';
}
}
}
return error;
})();
"""))
def HasReachedQuiescence(self):
"""Determine whether the page has reached quiescence after loading.
Returns:
True if 2 seconds have passed since last resource received, false
otherwise."""
# Inclusion of the script that provides
# window.__telemetry_testHasReachedNetworkQuiescence()
# is idempotent, it's run on every call because WebContents doesn't track
# page loads and we need to execute anew for every newly loaded page.
has_reached_quiescence = (
self.EvaluateJavaScript(self._quiescence_js +
"window.__telemetry_testHasReachedNetworkQuiescence()"))
return has_reached_quiescence
def ExecuteJavaScript(self, statement, timeout=DEFAULT_WEB_CONTENTS_TIMEOUT):
"""Executes statement in JavaScript. Does not return the result.
If the statement failed to evaluate, EvaluateException will be raised.
"""
return self.ExecuteJavaScriptInContext(
statement, context_id=None, timeout=timeout)
def EvaluateJavaScript(self, expr, timeout=DEFAULT_WEB_CONTENTS_TIMEOUT):
"""Evalutes expr in JavaScript and returns the JSONized result.
Consider using ExecuteJavaScript for cases where the result of the
expression is not needed.
If evaluation throws in JavaScript, a Python EvaluateException will
be raised.
If the result of the evaluation cannot be JSONized, then an
EvaluationException will be raised.
"""
return self.EvaluateJavaScriptInContext(
expr, context_id=None, timeout=timeout)
def ExecuteJavaScriptInContext(self, expr, context_id,
timeout=DEFAULT_WEB_CONTENTS_TIMEOUT):
"""Similar to ExecuteJavaScript, except context_id can refer to an iframe.
The main page has context_id=1, the first iframe context_id=2, etc.
"""
return self._inspector_backend.ExecuteJavaScript(
expr, context_id=context_id, timeout=timeout)
def EvaluateJavaScriptInContext(self, expr, context_id,
timeout=DEFAULT_WEB_CONTENTS_TIMEOUT):
"""Similar to ExecuteJavaScript, except context_id can refer to an iframe.
The main page has context_id=1, the first iframe context_id=2, etc.
"""
return self._inspector_backend.EvaluateJavaScript(
expr, context_id=context_id, timeout=timeout)
def EnableAllContexts(self):
"""Enable all contexts in a page. Returns the number of available contexts.
"""
return self._inspector_backend.EnableAllContexts()
def WaitForNavigate(self, timeout=DEFAULT_WEB_CONTENTS_TIMEOUT):
"""Waits for the navigation to complete.
The current page is expect to be in a navigation.
This function returns when the navigation is complete or when
the timeout has been exceeded.
"""
self._inspector_backend.WaitForNavigate(timeout)
def Navigate(self, url, script_to_evaluate_on_commit=None,
timeout=DEFAULT_WEB_CONTENTS_TIMEOUT):
"""Navigates to url.
If |script_to_evaluate_on_commit| is given, the script source string will be
evaluated when the navigation is committed. This is after the context of
the page exists, but before any script on the page itself has executed.
"""
self._inspector_backend.Navigate(url, script_to_evaluate_on_commit, timeout)
@property
def message_output_stream(self):
return self._inspector_backend.message_output_stream
@message_output_stream.setter
def message_output_stream(self, stream):
self._inspector_backend.message_output_stream = stream
@property
def timeline_model(self):
return self._inspector_backend.timeline_model
def StartTimelineRecording(self):
self._inspector_backend.StartTimelineRecording()
def StopTimelineRecording(self):
self._inspector_backend.StopTimelineRecording()
def TakeJSHeapSnapshot(self, timeout=120):
return self._inspector_backend.TakeJSHeapSnapshot(timeout)
| bsd-3-clause |
Designist/sympy | sympy/polys/specialpolys.py | 87 | 9787 | """Functions for generating interesting polynomials, e.g. for benchmarking. """
from __future__ import print_function, division
from sympy.core import Add, Mul, Symbol, sympify, Dummy, symbols
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.core.singleton import S
from sympy.polys.polytools import Poly, PurePoly
from sympy.polys.polyutils import _analyze_gens
from sympy.polys.polyclasses import DMP
from sympy.polys.densebasic import (
dmp_zero, dmp_one, dmp_ground,
dup_from_raw_dict, dmp_raise, dup_random
)
from sympy.polys.densearith import (
dmp_add_term, dmp_neg, dmp_mul, dmp_sqr
)
from sympy.polys.factortools import (
dup_zz_cyclotomic_poly
)
from sympy.polys.domains import ZZ
from sympy.ntheory import nextprime
from sympy.utilities import subsets, public
from sympy.core.compatibility import range
@public
def swinnerton_dyer_poly(n, x=None, **args):
"""Generates n-th Swinnerton-Dyer polynomial in `x`. """
from .numberfields import minimal_polynomial
if n <= 0:
raise ValueError(
"can't generate Swinnerton-Dyer polynomial of order %s" % n)
if x is not None:
sympify(x)
else:
x = Dummy('x')
if n > 3:
p = 2
a = [sqrt(2)]
for i in range(2, n + 1):
p = nextprime(p)
a.append(sqrt(p))
return minimal_polynomial(Add(*a), x, polys=args.get('polys', False))
if n == 1:
ex = x**2 - 2
elif n == 2:
ex = x**4 - 10*x**2 + 1
elif n == 3:
ex = x**8 - 40*x**6 + 352*x**4 - 960*x**2 + 576
if not args.get('polys', False):
return ex
else:
return PurePoly(ex, x)
@public
def cyclotomic_poly(n, x=None, **args):
"""Generates cyclotomic polynomial of order `n` in `x`. """
if n <= 0:
raise ValueError(
"can't generate cyclotomic polynomial of order %s" % n)
poly = DMP(dup_zz_cyclotomic_poly(int(n), ZZ), ZZ)
if x is not None:
poly = Poly.new(poly, x)
else:
poly = PurePoly.new(poly, Dummy('x'))
if not args.get('polys', False):
return poly.as_expr()
else:
return poly
@public
def symmetric_poly(n, *gens, **args):
"""Generates symmetric polynomial of order `n`. """
gens = _analyze_gens(gens)
if n < 0 or n > len(gens) or not gens:
raise ValueError("can't generate symmetric polynomial of order %s for %s" % (n, gens))
elif not n:
poly = S.One
else:
poly = Add(*[ Mul(*s) for s in subsets(gens, int(n)) ])
if not args.get('polys', False):
return poly
else:
return Poly(poly, *gens)
@public
def random_poly(x, n, inf, sup, domain=ZZ, polys=False):
"""Return a polynomial of degree ``n`` with coefficients in ``[inf, sup]``. """
poly = Poly(dup_random(n, inf, sup, domain), x, domain=domain)
if not polys:
return poly.as_expr()
else:
return poly
@public
def interpolating_poly(n, x, X='x', Y='y'):
"""Construct Lagrange interpolating polynomial for ``n`` data points. """
if isinstance(X, str):
X = symbols("%s:%s" % (X, n))
if isinstance(Y, str):
Y = symbols("%s:%s" % (Y, n))
coeffs = []
for i in range(0, n):
numer = []
denom = []
for j in range(0, n):
if i == j:
continue
numer.append(x - X[j])
denom.append(X[i] - X[j])
numer = Mul(*numer)
denom = Mul(*denom)
coeffs.append(numer/denom)
return Add(*[ coeff*y for coeff, y in zip(coeffs, Y) ])
def fateman_poly_F_1(n):
"""Fateman's GCD benchmark: trivial GCD """
Y = [ Symbol('y_' + str(i)) for i in range(0, n + 1) ]
y_0, y_1 = Y[0], Y[1]
u = y_0 + Add(*[ y for y in Y[1:] ])
v = y_0**2 + Add(*[ y**2 for y in Y[1:] ])
F = ((u + 1)*(u + 2)).as_poly(*Y)
G = ((v + 1)*(-3*y_1*y_0**2 + y_1**2 - 1)).as_poly(*Y)
H = Poly(1, *Y)
return F, G, H
def dmp_fateman_poly_F_1(n, K):
"""Fateman's GCD benchmark: trivial GCD """
u = [K(1), K(0)]
for i in range(0, n):
u = [dmp_one(i, K), u]
v = [K(1), K(0), K(0)]
for i in range(0, n):
v = [dmp_one(i, K), dmp_zero(i), v]
m = n - 1
U = dmp_add_term(u, dmp_ground(K(1), m), 0, n, K)
V = dmp_add_term(u, dmp_ground(K(2), m), 0, n, K)
f = [[-K(3), K(0)], [], [K(1), K(0), -K(1)]]
W = dmp_add_term(v, dmp_ground(K(1), m), 0, n, K)
Y = dmp_raise(f, m, 1, K)
F = dmp_mul(U, V, n, K)
G = dmp_mul(W, Y, n, K)
H = dmp_one(n, K)
return F, G, H
def fateman_poly_F_2(n):
"""Fateman's GCD benchmark: linearly dense quartic inputs """
Y = [ Symbol('y_' + str(i)) for i in range(0, n + 1) ]
y_0 = Y[0]
u = Add(*[ y for y in Y[1:] ])
H = Poly((y_0 + u + 1)**2, *Y)
F = Poly((y_0 - u - 2)**2, *Y)
G = Poly((y_0 + u + 2)**2, *Y)
return H*F, H*G, H
def dmp_fateman_poly_F_2(n, K):
"""Fateman's GCD benchmark: linearly dense quartic inputs """
u = [K(1), K(0)]
for i in range(0, n - 1):
u = [dmp_one(i, K), u]
m = n - 1
v = dmp_add_term(u, dmp_ground(K(2), m - 1), 0, n, K)
f = dmp_sqr([dmp_one(m, K), dmp_neg(v, m, K)], n, K)
g = dmp_sqr([dmp_one(m, K), v], n, K)
v = dmp_add_term(u, dmp_one(m - 1, K), 0, n, K)
h = dmp_sqr([dmp_one(m, K), v], n, K)
return dmp_mul(f, h, n, K), dmp_mul(g, h, n, K), h
def fateman_poly_F_3(n):
"""Fateman's GCD benchmark: sparse inputs (deg f ~ vars f) """
Y = [ Symbol('y_' + str(i)) for i in range(0, n + 1) ]
y_0 = Y[0]
u = Add(*[ y**(n + 1) for y in Y[1:] ])
H = Poly((y_0**(n + 1) + u + 1)**2, *Y)
F = Poly((y_0**(n + 1) - u - 2)**2, *Y)
G = Poly((y_0**(n + 1) + u + 2)**2, *Y)
return H*F, H*G, H
def dmp_fateman_poly_F_3(n, K):
"""Fateman's GCD benchmark: sparse inputs (deg f ~ vars f) """
u = dup_from_raw_dict({n + 1: K.one}, K)
for i in range(0, n - 1):
u = dmp_add_term([u], dmp_one(i, K), n + 1, i + 1, K)
v = dmp_add_term(u, dmp_ground(K(2), n - 2), 0, n, K)
f = dmp_sqr(
dmp_add_term([dmp_neg(v, n - 1, K)], dmp_one(n - 1, K), n + 1, n, K), n, K)
g = dmp_sqr(dmp_add_term([v], dmp_one(n - 1, K), n + 1, n, K), n, K)
v = dmp_add_term(u, dmp_one(n - 2, K), 0, n - 1, K)
h = dmp_sqr(dmp_add_term([v], dmp_one(n - 1, K), n + 1, n, K), n, K)
return dmp_mul(f, h, n, K), dmp_mul(g, h, n, K), h
# A few useful polynomials from Wang's paper ('78).
from sympy.polys.rings import ring
def _f_0():
R, x, y, z = ring("x,y,z", ZZ)
return x**2*y*z**2 + 2*x**2*y*z + 3*x**2*y + 2*x**2 + 3*x + 4*y**2*z**2 + 5*y**2*z + 6*y**2 + y*z**2 + 2*y*z + y + 1
def _f_1():
R, x, y, z = ring("x,y,z", ZZ)
return x**3*y*z + x**2*y**2*z**2 + x**2*y**2 + 20*x**2*y*z + 30*x**2*y + x**2*z**2 + 10*x**2*z + x*y**3*z + 30*x*y**2*z + 20*x*y**2 + x*y*z**3 + 10*x*y*z**2 + x*y*z + 610*x*y + 20*x*z**2 + 230*x*z + 300*x + y**2*z**2 + 10*y**2*z + 30*y*z**2 + 320*y*z + 200*y + 600*z + 6000
def _f_2():
R, x, y, z = ring("x,y,z", ZZ)
return x**5*y**3 + x**5*y**2*z + x**5*y*z**2 + x**5*z**3 + x**3*y**2 + x**3*y*z + 90*x**3*y + 90*x**3*z + x**2*y**2*z - 11*x**2*y**2 + x**2*z**3 - 11*x**2*z**2 + y*z - 11*y + 90*z - 990
def _f_3():
R, x, y, z = ring("x,y,z", ZZ)
return x**5*y**2 + x**4*z**4 + x**4 + x**3*y**3*z + x**3*z + x**2*y**4 + x**2*y**3*z**3 + x**2*y*z**5 + x**2*y*z + x*y**2*z**4 + x*y**2 + x*y*z**7 + x*y*z**3 + x*y*z**2 + y**2*z + y*z**4
def _f_4():
R, x, y, z = ring("x,y,z", ZZ)
return -x**9*y**8*z - x**8*y**5*z**3 - x**7*y**12*z**2 - 5*x**7*y**8 - x**6*y**9*z**4 + x**6*y**7*z**3 + 3*x**6*y**7*z - 5*x**6*y**5*z**2 - x**6*y**4*z**3 + x**5*y**4*z**5 + 3*x**5*y**4*z**3 - x**5*y*z**5 + x**4*y**11*z**4 + 3*x**4*y**11*z**2 - x**4*y**8*z**4 + 5*x**4*y**7*z**2 + 15*x**4*y**7 - 5*x**4*y**4*z**2 + x**3*y**8*z**6 + 3*x**3*y**8*z**4 - x**3*y**5*z**6 + 5*x**3*y**4*z**4 + 15*x**3*y**4*z**2 + x**3*y**3*z**5 + 3*x**3*y**3*z**3 - 5*x**3*y*z**4 + x**2*z**7 + 3*x**2*z**5 + x*y**7*z**6 + 3*x*y**7*z**4 + 5*x*y**3*z**4 + 15*x*y**3*z**2 + y**4*z**8 + 3*y**4*z**6 + 5*z**6 + 15*z**4
def _f_5():
R, x, y, z = ring("x,y,z", ZZ)
return -x**3 - 3*x**2*y + 3*x**2*z - 3*x*y**2 + 6*x*y*z - 3*x*z**2 - y**3 + 3*y**2*z - 3*y*z**2 + z**3
def _f_6():
R, x, y, z, t = ring("x,y,z,t", ZZ)
return 2115*x**4*y + 45*x**3*z**3*t**2 - 45*x**3*t**2 - 423*x*y**4 - 47*x*y**3 + 141*x*y*z**3 + 94*x*y*z*t - 9*y**3*z**3*t**2 + 9*y**3*t**2 - y**2*z**3*t**2 + y**2*t**2 + 3*z**6*t**2 + 2*z**4*t**3 - 3*z**3*t**2 - 2*z*t**3
def _w_1():
R, x, y, z = ring("x,y,z", ZZ)
return 4*x**6*y**4*z**2 + 4*x**6*y**3*z**3 - 4*x**6*y**2*z**4 - 4*x**6*y*z**5 + x**5*y**4*z**3 + 12*x**5*y**3*z - x**5*y**2*z**5 + 12*x**5*y**2*z**2 - 12*x**5*y*z**3 - 12*x**5*z**4 + 8*x**4*y**4 + 6*x**4*y**3*z**2 + 8*x**4*y**3*z - 4*x**4*y**2*z**4 + 4*x**4*y**2*z**3 - 8*x**4*y**2*z**2 - 4*x**4*y*z**5 - 2*x**4*y*z**4 - 8*x**4*y*z**3 + 2*x**3*y**4*z + x**3*y**3*z**3 - x**3*y**2*z**5 - 2*x**3*y**2*z**3 + 9*x**3*y**2*z - 12*x**3*y*z**3 + 12*x**3*y*z**2 - 12*x**3*z**4 + 3*x**3*z**3 + 6*x**2*y**3 - 6*x**2*y**2*z**2 + 8*x**2*y**2*z - 2*x**2*y*z**4 - 8*x**2*y*z**3 + 2*x**2*y*z**2 + 2*x*y**3*z - 2*x*y**2*z**3 - 3*x*y*z + 3*x*z**3 - 2*y**2 + 2*y*z**2
def _w_2():
R, x, y = ring("x,y", ZZ)
return 24*x**8*y**3 + 48*x**8*y**2 + 24*x**7*y**5 - 72*x**7*y**2 + 25*x**6*y**4 + 2*x**6*y**3 + 4*x**6*y + 8*x**6 + x**5*y**6 + x**5*y**3 - 12*x**5 + x**4*y**5 - x**4*y**4 - 2*x**4*y**3 + 292*x**4*y**2 - x**3*y**6 + 3*x**3*y**3 - x**2*y**5 + 12*x**2*y**3 + 48*x**2 - 12*y**3
def f_polys():
return _f_0(), _f_1(), _f_2(), _f_3(), _f_4(), _f_5(), _f_6()
def w_polys():
return _w_1(), _w_2()
| bsd-3-clause |
tboyce021/home-assistant | homeassistant/components/flexit/climate.py | 16 | 5180 | """Platform for Flexit AC units with CI66 Modbus adapter."""
import logging
from typing import List
from pyflexit.pyflexit import pyflexit
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity
from homeassistant.components.climate.const import (
HVAC_MODE_COOL,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.components.modbus.const import CONF_HUB, DEFAULT_HUB, MODBUS_DOMAIN
from homeassistant.const import (
ATTR_TEMPERATURE,
CONF_NAME,
CONF_SLAVE,
DEVICE_DEFAULT_NAME,
TEMP_CELSIUS,
)
import homeassistant.helpers.config_validation as cv
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HUB, default=DEFAULT_HUB): cv.string,
vol.Required(CONF_SLAVE): vol.All(int, vol.Range(min=0, max=32)),
vol.Optional(CONF_NAME, default=DEVICE_DEFAULT_NAME): cv.string,
}
)
_LOGGER = logging.getLogger(__name__)
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Flexit Platform."""
modbus_slave = config.get(CONF_SLAVE)
name = config.get(CONF_NAME)
hub = hass.data[MODBUS_DOMAIN][config.get(CONF_HUB)]
add_entities([Flexit(hub, modbus_slave, name)], True)
class Flexit(ClimateEntity):
"""Representation of a Flexit AC unit."""
def __init__(self, hub, modbus_slave, name):
"""Initialize the unit."""
self._hub = hub
self._name = name
self._slave = modbus_slave
self._target_temperature = None
self._current_temperature = None
self._current_fan_mode = None
self._current_operation = None
self._fan_modes = ["Off", "Low", "Medium", "High"]
self._current_operation = None
self._filter_hours = None
self._filter_alarm = None
self._heat_recovery = None
self._heater_enabled = False
self._heating = None
self._cooling = None
self._alarm = False
self.unit = pyflexit(hub, modbus_slave)
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
def update(self):
"""Update unit attributes."""
if not self.unit.update():
_LOGGER.warning("Modbus read failed")
self._target_temperature = self.unit.get_target_temp
self._current_temperature = self.unit.get_temp
self._current_fan_mode = self._fan_modes[self.unit.get_fan_speed]
self._filter_hours = self.unit.get_filter_hours
# Mechanical heat recovery, 0-100%
self._heat_recovery = self.unit.get_heat_recovery
# Heater active 0-100%
self._heating = self.unit.get_heating
# Cooling active 0-100%
self._cooling = self.unit.get_cooling
# Filter alarm 0/1
self._filter_alarm = self.unit.get_filter_alarm
# Heater enabled or not. Does not mean it's necessarily heating
self._heater_enabled = self.unit.get_heater_enabled
# Current operation mode
self._current_operation = self.unit.get_operation
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return {
"filter_hours": self._filter_hours,
"filter_alarm": self._filter_alarm,
"heat_recovery": self._heat_recovery,
"heating": self._heating,
"heater_enabled": self._heater_enabled,
"cooling": self._cooling,
}
@property
def should_poll(self):
"""Return the polling state."""
return True
@property
def name(self):
"""Return the name of the climate device."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def hvac_mode(self):
"""Return current operation ie. heat, cool, idle."""
return self._current_operation
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
return [HVAC_MODE_COOL]
@property
def fan_mode(self):
"""Return the fan setting."""
return self._current_fan_mode
@property
def fan_modes(self):
"""Return the list of available fan modes."""
return self._fan_modes
def set_temperature(self, **kwargs):
"""Set new target temperature."""
if kwargs.get(ATTR_TEMPERATURE) is not None:
self._target_temperature = kwargs.get(ATTR_TEMPERATURE)
self.unit.set_temp(self._target_temperature)
def set_fan_mode(self, fan_mode):
"""Set new fan mode."""
self.unit.set_fan_speed(self._fan_modes.index(fan_mode))
| apache-2.0 |
e-gob/plataforma-kioscos-autoatencion | scripts/ansible-play/.venv/lib/python2.7/site-packages/cryptography/hazmat/primitives/ciphers/algorithms.py | 3 | 4150 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
from cryptography import utils
from cryptography.hazmat.primitives.ciphers import (
BlockCipherAlgorithm, CipherAlgorithm
)
from cryptography.hazmat.primitives.ciphers.modes import ModeWithNonce
def _verify_key_size(algorithm, key):
# Verify that the key size matches the expected key size
if len(key) * 8 not in algorithm.key_sizes:
raise ValueError("Invalid key size ({0}) for {1}.".format(
len(key) * 8, algorithm.name
))
return key
@utils.register_interface(BlockCipherAlgorithm)
@utils.register_interface(CipherAlgorithm)
class AES(object):
name = "AES"
block_size = 128
# 512 added to support AES-256-XTS, which uses 512-bit keys
key_sizes = frozenset([128, 192, 256, 512])
def __init__(self, key):
self.key = _verify_key_size(self, key)
@property
def key_size(self):
return len(self.key) * 8
@utils.register_interface(BlockCipherAlgorithm)
@utils.register_interface(CipherAlgorithm)
class Camellia(object):
name = "camellia"
block_size = 128
key_sizes = frozenset([128, 192, 256])
def __init__(self, key):
self.key = _verify_key_size(self, key)
@property
def key_size(self):
return len(self.key) * 8
@utils.register_interface(BlockCipherAlgorithm)
@utils.register_interface(CipherAlgorithm)
class TripleDES(object):
name = "3DES"
block_size = 64
key_sizes = frozenset([64, 128, 192])
def __init__(self, key):
if len(key) == 8:
key += key + key
elif len(key) == 16:
key += key[:8]
self.key = _verify_key_size(self, key)
@property
def key_size(self):
return len(self.key) * 8
@utils.register_interface(BlockCipherAlgorithm)
@utils.register_interface(CipherAlgorithm)
class Blowfish(object):
name = "Blowfish"
block_size = 64
key_sizes = frozenset(range(32, 449, 8))
def __init__(self, key):
self.key = _verify_key_size(self, key)
@property
def key_size(self):
return len(self.key) * 8
@utils.register_interface(BlockCipherAlgorithm)
@utils.register_interface(CipherAlgorithm)
class CAST5(object):
name = "CAST5"
block_size = 64
key_sizes = frozenset(range(40, 129, 8))
def __init__(self, key):
self.key = _verify_key_size(self, key)
@property
def key_size(self):
return len(self.key) * 8
@utils.register_interface(CipherAlgorithm)
class ARC4(object):
name = "RC4"
key_sizes = frozenset([40, 56, 64, 80, 128, 160, 192, 256])
def __init__(self, key):
self.key = _verify_key_size(self, key)
@property
def key_size(self):
return len(self.key) * 8
@utils.register_interface(CipherAlgorithm)
class IDEA(object):
name = "IDEA"
block_size = 64
key_sizes = frozenset([128])
def __init__(self, key):
self.key = _verify_key_size(self, key)
@property
def key_size(self):
return len(self.key) * 8
@utils.register_interface(BlockCipherAlgorithm)
@utils.register_interface(CipherAlgorithm)
class SEED(object):
name = "SEED"
block_size = 128
key_sizes = frozenset([128])
def __init__(self, key):
self.key = _verify_key_size(self, key)
@property
def key_size(self):
return len(self.key) * 8
@utils.register_interface(CipherAlgorithm)
@utils.register_interface(ModeWithNonce)
class ChaCha20(object):
name = "ChaCha20"
key_sizes = frozenset([256])
def __init__(self, key, nonce):
self.key = _verify_key_size(self, key)
if not isinstance(nonce, bytes):
raise TypeError("nonce must be bytes")
if len(nonce) != 16:
raise ValueError("nonce must be 128-bits (16 bytes)")
self._nonce = nonce
nonce = utils.read_only_property("_nonce")
@property
def key_size(self):
return len(self.key) * 8
| bsd-3-clause |
aYukiSekiguchi/ACCESS-Chromium | third_party/mesa/MesaLib/src/mapi/glapi/gen/glX_proto_size.py | 33 | 18969 | #!/usr/bin/env python
# (C) Copyright IBM Corporation 2004, 2005
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <idr@us.ibm.com>
import gl_XML, glX_XML
import license
import sys, getopt, copy, string
class glx_enum_function:
def __init__(self, func_name, enum_dict):
self.name = func_name
self.mode = 1
self.sig = None
# "enums" is a set of lists. The element in the set is the
# value of the enum. The list is the list of names for that
# value. For example, [0x8126] = {"POINT_SIZE_MIN",
# "POINT_SIZE_MIN_ARB", "POINT_SIZE_MIN_EXT",
# "POINT_SIZE_MIN_SGIS"}.
self.enums = {}
# "count" is indexed by count values. Each element of count
# is a list of index to "enums" that have that number of
# associated data elements. For example, [4] =
# {GL_AMBIENT, GL_DIFFUSE, GL_SPECULAR, GL_EMISSION,
# GL_AMBIENT_AND_DIFFUSE} (the enum names are used here,
# but the actual hexadecimal values would be in the array).
self.count = {}
# Fill self.count and self.enums using the dictionary of enums
# that was passed in. The generic Get functions (e.g.,
# GetBooleanv and friends) are handled specially here. In
# the data the generic Get functions are refered to as "Get".
if func_name in ["GetIntegerv", "GetBooleanv", "GetFloatv", "GetDoublev"]:
match_name = "Get"
else:
match_name = func_name
mode_set = 0
for enum_name in enum_dict:
e = enum_dict[ enum_name ]
if e.functions.has_key( match_name ):
[count, mode] = e.functions[ match_name ]
if mode_set and mode != self.mode:
raise RuntimeError("Not all enums for %s have the same mode." % (func_name))
self.mode = mode
if self.enums.has_key( e.value ):
if e.name not in self.enums[ e.value ]:
self.enums[ e.value ].append( e )
else:
if not self.count.has_key( count ):
self.count[ count ] = []
self.enums[ e.value ] = [ e ]
self.count[ count ].append( e.value )
return
def signature( self ):
if self.sig == None:
self.sig = ""
for i in self.count:
if i == None:
raise RuntimeError("i is None. WTF?")
self.count[i].sort()
for e in self.count[i]:
self.sig += "%04x,%d," % (e, i)
return self.sig
def is_set( self ):
return self.mode
def PrintUsingTable(self):
"""Emit the body of the __gl*_size function using a pair
of look-up tables and a mask. The mask is calculated such
that (e & mask) is unique for all the valid values of e for
this function. The result of (e & mask) is used as an index
into the first look-up table. If it matches e, then the
same entry of the second table is returned. Otherwise zero
is returned.
It seems like this should cause better code to be generated.
However, on x86 at least, the resulting .o file is about 20%
larger then the switch-statment version. I am leaving this
code in because the results may be different on other
platforms (e.g., PowerPC or x86-64)."""
return 0
count = 0
for a in self.enums:
count += 1
if self.count.has_key(-1):
return 0
# Determine if there is some mask M, such that M = (2^N) - 1,
# that will generate unique values for all of the enums.
mask = 0
for i in [1, 2, 3, 4, 5, 6, 7, 8]:
mask = (1 << i) - 1
fail = 0;
for a in self.enums:
for b in self.enums:
if a != b:
if (a & mask) == (b & mask):
fail = 1;
if not fail:
break;
else:
mask = 0
if (mask != 0) and (mask < (2 * count)):
masked_enums = {}
masked_count = {}
for i in range(0, mask + 1):
masked_enums[i] = "0";
masked_count[i] = 0;
for c in self.count:
for e in self.count[c]:
i = e & mask
enum_obj = self.enums[e][0]
masked_enums[i] = '0x%04x /* %s */' % (e, enum_obj.name )
masked_count[i] = c
print ' static const GLushort a[%u] = {' % (mask + 1)
for e in masked_enums:
print ' %s, ' % (masked_enums[e])
print ' };'
print ' static const GLubyte b[%u] = {' % (mask + 1)
for c in masked_count:
print ' %u, ' % (masked_count[c])
print ' };'
print ' const unsigned idx = (e & 0x%02xU);' % (mask)
print ''
print ' return (e == a[idx]) ? (GLint) b[idx] : 0;'
return 1;
else:
return 0;
def PrintUsingSwitch(self, name):
"""Emit the body of the __gl*_size function using a
switch-statement."""
print ' switch( e ) {'
for c in self.count:
for e in self.count[c]:
first = 1
# There may be multiple enums with the same
# value. This happens has extensions are
# promoted from vendor-specific or EXT to
# ARB and to the core. Emit the first one as
# a case label, and emit the others as
# commented-out case labels.
list = {}
for enum_obj in self.enums[e]:
list[ enum_obj.priority() ] = enum_obj.name
keys = list.keys()
keys.sort()
for k in keys:
j = list[k]
if first:
print ' case GL_%s:' % (j)
first = 0
else:
print '/* case GL_%s:*/' % (j)
if c == -1:
print ' return __gl%s_variable_size( e );' % (name)
else:
print ' return %u;' % (c)
print ' default: return 0;'
print ' }'
def Print(self, name):
print 'INTERNAL PURE FASTCALL GLint'
print '__gl%s_size( GLenum e )' % (name)
print '{'
if not self.PrintUsingTable():
self.PrintUsingSwitch(name)
print '}'
print ''
class glx_server_enum_function(glx_enum_function):
def __init__(self, func, enum_dict):
glx_enum_function.__init__(self, func.name, enum_dict)
self.function = func
return
def signature( self ):
if self.sig == None:
sig = glx_enum_function.signature(self)
p = self.function.variable_length_parameter()
if p:
sig += "%u" % (p.size())
self.sig = sig
return self.sig;
def Print(self, name, printer):
f = self.function
printer.common_func_print_just_header( f )
fixup = []
foo = {}
for param_name in f.count_parameter_list:
o = f.offset_of( param_name )
foo[o] = param_name
for param_name in f.counter_list:
o = f.offset_of( param_name )
foo[o] = param_name
keys = foo.keys()
keys.sort()
for o in keys:
p = f.parameters_by_name[ foo[o] ]
printer.common_emit_one_arg(p, "pc", 0)
fixup.append( p.name )
print ' GLsizei compsize;'
print ''
printer.common_emit_fixups(fixup)
print ''
print ' compsize = __gl%s_size(%s);' % (f.name, string.join(f.count_parameter_list, ","))
p = f.variable_length_parameter()
print ' return __GLX_PAD(%s);' % (p.size_string())
print '}'
print ''
class PrintGlxSizeStubs_common(gl_XML.gl_print_base):
do_get = (1 << 0)
do_set = (1 << 1)
def __init__(self, which_functions):
gl_XML.gl_print_base.__init__(self)
self.name = "glX_proto_size.py (from Mesa)"
self.license = license.bsd_license_template % ( "(C) Copyright IBM Corporation 2004", "IBM")
self.emit_set = ((which_functions & PrintGlxSizeStubs_common.do_set) != 0)
self.emit_get = ((which_functions & PrintGlxSizeStubs_common.do_get) != 0)
return
class PrintGlxSizeStubs_c(PrintGlxSizeStubs_common):
def printRealHeader(self):
print ''
print '#include <GL/gl.h>'
if self.emit_get:
print '#include "indirect_size_get.h"'
print '#include "glxserver.h"'
print '#include "indirect_util.h"'
print '#include "indirect_size.h"'
print ''
self.printPure()
print ''
self.printFastcall()
print ''
self.printVisibility( "INTERNAL", "internal" )
print ''
print ''
print '#if defined(__CYGWIN__) || defined(__MINGW32__) || defined(GLX_USE_APPLEGL)'
print '# undef HAVE_ALIAS'
print '#endif'
print '#ifdef HAVE_ALIAS'
print '# define ALIAS2(from,to) \\'
print ' INTERNAL PURE FASTCALL GLint __gl ## from ## _size( GLenum e ) \\'
print ' __attribute__ ((alias( # to )));'
print '# define ALIAS(from,to) ALIAS2( from, __gl ## to ## _size )'
print '#else'
print '# define ALIAS(from,to) \\'
print ' INTERNAL PURE FASTCALL GLint __gl ## from ## _size( GLenum e ) \\'
print ' { return __gl ## to ## _size( e ); }'
print '#endif'
print ''
print ''
def printBody(self, api):
enum_sigs = {}
aliases = []
for func in api.functionIterateGlx():
ef = glx_enum_function( func.name, api.enums_by_name )
if len(ef.enums) == 0:
continue
if (ef.is_set() and self.emit_set) or (not ef.is_set() and self.emit_get):
sig = ef.signature()
if enum_sigs.has_key( sig ):
aliases.append( [func.name, enum_sigs[ sig ]] )
else:
enum_sigs[ sig ] = func.name
ef.Print( func.name )
for [alias_name, real_name] in aliases:
print 'ALIAS( %s, %s )' % (alias_name, real_name)
class PrintGlxSizeStubs_h(PrintGlxSizeStubs_common):
def printRealHeader(self):
print """/**
* \\file
* Prototypes for functions used to determine the number of data elements in
* various GLX protocol messages.
*
* \\author Ian Romanick <idr@us.ibm.com>
*/
"""
self.printPure();
print ''
self.printFastcall();
print ''
self.printVisibility( "INTERNAL", "internal" );
print ''
def printBody(self, api):
for func in api.functionIterateGlx():
ef = glx_enum_function( func.name, api.enums_by_name )
if len(ef.enums) == 0:
continue
if (ef.is_set() and self.emit_set) or (not ef.is_set() and self.emit_get):
print 'extern INTERNAL PURE FASTCALL GLint __gl%s_size(GLenum);' % (func.name)
class PrintGlxReqSize_common(gl_XML.gl_print_base):
"""Common base class for PrintGlxSizeReq_h and PrintGlxSizeReq_h.
The main purpose of this common base class is to provide the infrastructure
for the derrived classes to iterate over the same set of functions.
"""
def __init__(self):
gl_XML.gl_print_base.__init__(self)
self.name = "glX_proto_size.py (from Mesa)"
self.license = license.bsd_license_template % ( "(C) Copyright IBM Corporation 2005", "IBM")
class PrintGlxReqSize_h(PrintGlxReqSize_common):
def __init__(self):
PrintGlxReqSize_common.__init__(self)
self.header_tag = "_INDIRECT_REQSIZE_H_"
def printRealHeader(self):
self.printVisibility("HIDDEN", "hidden")
print ''
self.printPure()
print ''
def printBody(self, api):
for func in api.functionIterateGlx():
if not func.ignore and func.has_variable_size_request():
print 'extern PURE HIDDEN int __glX%sReqSize(const GLbyte *pc, Bool swap);' % (func.name)
class PrintGlxReqSize_c(PrintGlxReqSize_common):
"""Create the server-side 'request size' functions.
Create the server-side functions that are used to determine what the
size of a varible length command should be. The server then uses
this value to determine if the incoming command packed it malformed.
"""
def __init__(self):
PrintGlxReqSize_common.__init__(self)
self.counter_sigs = {}
def printRealHeader(self):
print ''
print '#include <GL/gl.h>'
print '#include "glxserver.h"'
print '#include "glxbyteorder.h"'
print '#include "indirect_size.h"'
print '#include "indirect_reqsize.h"'
print ''
print '#define __GLX_PAD(x) (((x) + 3) & ~3)'
print ''
print '#if defined(__CYGWIN__) || defined(__MINGW32__)'
print '# undef HAVE_ALIAS'
print '#endif'
print '#ifdef HAVE_ALIAS'
print '# define ALIAS2(from,to) \\'
print ' GLint __glX ## from ## ReqSize( const GLbyte * pc, Bool swap ) \\'
print ' __attribute__ ((alias( # to )));'
print '# define ALIAS(from,to) ALIAS2( from, __glX ## to ## ReqSize )'
print '#else'
print '# define ALIAS(from,to) \\'
print ' GLint __glX ## from ## ReqSize( const GLbyte * pc, Bool swap ) \\'
print ' { return __glX ## to ## ReqSize( pc, swap ); }'
print '#endif'
print ''
print ''
def printBody(self, api):
aliases = []
enum_functions = {}
enum_sigs = {}
for func in api.functionIterateGlx():
if not func.has_variable_size_request(): continue
ef = glx_server_enum_function( func, api.enums_by_name )
if len(ef.enums) == 0: continue
sig = ef.signature()
if not enum_functions.has_key(func.name):
enum_functions[ func.name ] = sig
if not enum_sigs.has_key( sig ):
enum_sigs[ sig ] = ef
for func in api.functionIterateGlx():
# Even though server-handcode fuctions are on "the
# list", and prototypes are generated for them, there
# isn't enough information to generate a size
# function. If there was enough information, they
# probably wouldn't need to be handcoded in the first
# place!
if func.server_handcode: continue
if not func.has_variable_size_request(): continue
if enum_functions.has_key(func.name):
sig = enum_functions[func.name]
ef = enum_sigs[ sig ]
if ef.name != func.name:
aliases.append( [func.name, ef.name] )
else:
ef.Print( func.name, self )
elif func.images:
self.printPixelFunction(func)
elif func.has_variable_size_request():
a = self.printCountedFunction(func)
if a: aliases.append(a)
for [alias_name, real_name] in aliases:
print 'ALIAS( %s, %s )' % (alias_name, real_name)
return
def common_emit_fixups(self, fixup):
"""Utility function to emit conditional byte-swaps."""
if fixup:
print ' if (swap) {'
for name in fixup:
print ' %s = bswap_32(%s);' % (name, name)
print ' }'
return
def common_emit_one_arg(self, p, pc, adjust):
offset = p.offset
dst = p.string()
src = '(%s *)' % (p.type_string())
print '%-18s = *%11s(%s + %u);' % (dst, src, pc, offset + adjust);
return
def common_func_print_just_header(self, f):
print 'int'
print '__glX%sReqSize( const GLbyte * pc, Bool swap )' % (f.name)
print '{'
def printPixelFunction(self, f):
self.common_func_print_just_header(f)
f.offset_of( f.parameters[0].name )
[dim, w, h, d, junk] = f.get_images()[0].get_dimensions()
print ' GLint row_length = * (GLint *)(pc + 4);'
if dim < 3:
fixup = ['row_length', 'skip_rows', 'alignment']
print ' GLint image_height = 0;'
print ' GLint skip_images = 0;'
print ' GLint skip_rows = * (GLint *)(pc + 8);'
print ' GLint alignment = * (GLint *)(pc + 16);'
else:
fixup = ['row_length', 'image_height', 'skip_rows', 'skip_images', 'alignment']
print ' GLint image_height = * (GLint *)(pc + 8);'
print ' GLint skip_rows = * (GLint *)(pc + 16);'
print ' GLint skip_images = * (GLint *)(pc + 20);'
print ' GLint alignment = * (GLint *)(pc + 32);'
img = f.images[0]
for p in f.parameterIterateGlxSend():
if p.name in [w, h, d, img.img_format, img.img_type, img.img_target]:
self.common_emit_one_arg(p, "pc", 0)
fixup.append( p.name )
print ''
self.common_emit_fixups(fixup)
if img.img_null_flag:
print ''
print ' if (*(CARD32 *) (pc + %s))' % (img.offset - 4)
print ' return 0;'
print ''
print ' return __glXImageSize(%s, %s, %s, %s, %s, %s,' % (img.img_format, img.img_type, img.img_target, w, h, d )
print ' image_height, row_length, skip_images,'
print ' skip_rows, alignment);'
print '}'
print ''
return
def printCountedFunction(self, f):
sig = ""
offset = 0
fixup = []
params = []
plus = ''
size = ''
param_offsets = {}
# Calculate the offset of each counter parameter and the
# size string for the variable length parameter(s). While
# that is being done, calculate a unique signature for this
# function.
for p in f.parameterIterateGlxSend():
if p.is_counter:
fixup.append( p.name )
params.append( p )
elif p.counter:
s = p.size()
if s == 0: s = 1
sig += "(%u,%u)" % (f.offset_of(p.counter), s)
size += '%s%s' % (plus, p.size_string())
plus = ' + '
# If the calculated signature matches a function that has
# already be emitted, don't emit this function. Instead, add
# it to the list of function aliases.
if self.counter_sigs.has_key(sig):
n = self.counter_sigs[sig];
alias = [f.name, n]
else:
alias = None
self.counter_sigs[sig] = f.name
self.common_func_print_just_header(f)
for p in params:
self.common_emit_one_arg(p, "pc", 0)
print ''
self.common_emit_fixups(fixup)
print ''
print ' return __GLX_PAD(%s);' % (size)
print '}'
print ''
return alias
def show_usage():
print "Usage: %s [-f input_file_name] -m output_mode [--only-get | --only-set] [--get-alias-set]" % sys.argv[0]
print " -m output_mode Output mode can be one of 'size_c' or 'size_h'."
print " --only-get Only emit 'get'-type functions."
print " --only-set Only emit 'set'-type functions."
print ""
print "By default, both 'get' and 'set'-type functions are emitted."
sys.exit(1)
if __name__ == '__main__':
file_name = "gl_API.xml"
try:
(args, trail) = getopt.getopt(sys.argv[1:], "f:m:h:", ["only-get", "only-set", "header-tag"])
except Exception,e:
show_usage()
mode = None
header_tag = None
which_functions = PrintGlxSizeStubs_common.do_get | PrintGlxSizeStubs_common.do_set
for (arg,val) in args:
if arg == "-f":
file_name = val
elif arg == "-m":
mode = val
elif arg == "--only-get":
which_functions = PrintGlxSizeStubs_common.do_get
elif arg == "--only-set":
which_functions = PrintGlxSizeStubs_common.do_set
elif (arg == '-h') or (arg == "--header-tag"):
header_tag = val
if mode == "size_c":
printer = PrintGlxSizeStubs_c( which_functions )
elif mode == "size_h":
printer = PrintGlxSizeStubs_h( which_functions )
if header_tag:
printer.header_tag = header_tag
elif mode == "reqsize_c":
printer = PrintGlxReqSize_c()
elif mode == "reqsize_h":
printer = PrintGlxReqSize_h()
else:
show_usage()
api = gl_XML.parse_GL_API( file_name, glX_XML.glx_item_factory() )
printer.Print( api )
| bsd-3-clause |
Jericho/deep-learning | image-classification/problem_unittests.py | 91 | 7319 | import os
import numpy as np
import tensorflow as tf
import random
from unittest.mock import MagicMock
def _print_success_message():
print('Tests Passed')
def test_folder_path(cifar10_dataset_folder_path):
assert cifar10_dataset_folder_path is not None,\
'Cifar-10 data folder not set.'
assert cifar10_dataset_folder_path[-1] != '/',\
'The "/" shouldn\'t be added to the end of the path.'
assert os.path.exists(cifar10_dataset_folder_path),\
'Path not found.'
assert os.path.isdir(cifar10_dataset_folder_path),\
'{} is not a folder.'.format(os.path.basename(cifar10_dataset_folder_path))
train_files = [cifar10_dataset_folder_path + '/data_batch_' + str(batch_id) for batch_id in range(1, 6)]
other_files = [cifar10_dataset_folder_path + '/batches.meta', cifar10_dataset_folder_path + '/test_batch']
missing_files = [path for path in train_files + other_files if not os.path.exists(path)]
assert not missing_files,\
'Missing files in directory: {}'.format(missing_files)
print('All files found!')
def test_normalize(normalize):
test_shape = (np.random.choice(range(1000)), 32, 32, 3)
test_numbers = np.random.choice(range(256), test_shape)
normalize_out = normalize(test_numbers)
assert type(normalize_out).__module__ == np.__name__,\
'Not Numpy Object'
assert normalize_out.shape == test_shape,\
'Incorrect Shape. {} shape found'.format(normalize_out.shape)
assert normalize_out.max() <= 1 and normalize_out.min() >= 0,\
'Incorect Range. {} to {} found'.format(normalize_out.min(), normalize_out.max())
_print_success_message()
def test_one_hot_encode(one_hot_encode):
test_shape = np.random.choice(range(1000))
test_numbers = np.random.choice(range(10), test_shape)
one_hot_out = one_hot_encode(test_numbers)
assert type(one_hot_out).__module__ == np.__name__,\
'Not Numpy Object'
assert one_hot_out.shape == (test_shape, 10),\
'Incorrect Shape. {} shape found'.format(one_hot_out.shape)
n_encode_tests = 5
test_pairs = list(zip(test_numbers, one_hot_out))
test_indices = np.random.choice(len(test_numbers), n_encode_tests)
labels = [test_pairs[test_i][0] for test_i in test_indices]
enc_labels = np.array([test_pairs[test_i][1] for test_i in test_indices])
new_enc_labels = one_hot_encode(labels)
assert np.array_equal(enc_labels, new_enc_labels),\
'Encodings returned different results for the same numbers.\n' \
'For the first call it returned:\n' \
'{}\n' \
'For the second call it returned\n' \
'{}\n' \
'Make sure you save the map of labels to encodings outside of the function.'.format(enc_labels, new_enc_labels)
for one_hot in new_enc_labels:
assert (one_hot==1).sum() == 1,\
'Each one-hot-encoded value should include the number 1 exactly once.\n' \
'Found {}\n'.format(one_hot)
assert (one_hot==0).sum() == len(one_hot)-1,\
'Each one-hot-encoded value should include zeros in all but one position.\n' \
'Found {}\n'.format(one_hot)
_print_success_message()
def test_nn_image_inputs(neural_net_image_input):
image_shape = (32, 32, 3)
nn_inputs_out_x = neural_net_image_input(image_shape)
assert nn_inputs_out_x.get_shape().as_list() == [None, image_shape[0], image_shape[1], image_shape[2]],\
'Incorrect Image Shape. Found {} shape'.format(nn_inputs_out_x.get_shape().as_list())
assert nn_inputs_out_x.op.type == 'Placeholder',\
'Incorrect Image Type. Found {} type'.format(nn_inputs_out_x.op.type)
assert nn_inputs_out_x.name == 'x:0', \
'Incorrect Name. Found {}'.format(nn_inputs_out_x.name)
print('Image Input Tests Passed.')
def test_nn_label_inputs(neural_net_label_input):
n_classes = 10
nn_inputs_out_y = neural_net_label_input(n_classes)
assert nn_inputs_out_y.get_shape().as_list() == [None, n_classes],\
'Incorrect Label Shape. Found {} shape'.format(nn_inputs_out_y.get_shape().as_list())
assert nn_inputs_out_y.op.type == 'Placeholder',\
'Incorrect Label Type. Found {} type'.format(nn_inputs_out_y.op.type)
assert nn_inputs_out_y.name == 'y:0', \
'Incorrect Name. Found {}'.format(nn_inputs_out_y.name)
print('Label Input Tests Passed.')
def test_nn_keep_prob_inputs(neural_net_keep_prob_input):
nn_inputs_out_k = neural_net_keep_prob_input()
assert nn_inputs_out_k.get_shape().ndims is None,\
'Too many dimensions found for keep prob. Found {} dimensions. It should be a scalar (0-Dimension Tensor).'.format(nn_inputs_out_k.get_shape().ndims)
assert nn_inputs_out_k.op.type == 'Placeholder',\
'Incorrect keep prob Type. Found {} type'.format(nn_inputs_out_k.op.type)
assert nn_inputs_out_k.name == 'keep_prob:0', \
'Incorrect Name. Found {}'.format(nn_inputs_out_k.name)
print('Keep Prob Tests Passed.')
def test_con_pool(conv2d_maxpool):
test_x = tf.placeholder(tf.float32, [None, 32, 32, 5])
test_num_outputs = 10
test_con_k = (2, 2)
test_con_s = (4, 4)
test_pool_k = (2, 2)
test_pool_s = (2, 2)
conv2d_maxpool_out = conv2d_maxpool(test_x, test_num_outputs, test_con_k, test_con_s, test_pool_k, test_pool_s)
assert conv2d_maxpool_out.get_shape().as_list() == [None, 4, 4, 10],\
'Incorrect Shape. Found {} shape'.format(conv2d_maxpool_out.get_shape().as_list())
_print_success_message()
def test_flatten(flatten):
test_x = tf.placeholder(tf.float32, [None, 10, 30, 6])
flat_out = flatten(test_x)
assert flat_out.get_shape().as_list() == [None, 10*30*6],\
'Incorrect Shape. Found {} shape'.format(flat_out.get_shape().as_list())
_print_success_message()
def test_fully_conn(fully_conn):
test_x = tf.placeholder(tf.float32, [None, 128])
test_num_outputs = 40
fc_out = fully_conn(test_x, test_num_outputs)
assert fc_out.get_shape().as_list() == [None, 40],\
'Incorrect Shape. Found {} shape'.format(fc_out.get_shape().as_list())
_print_success_message()
def test_output(output):
test_x = tf.placeholder(tf.float32, [None, 128])
test_num_outputs = 40
output_out = output(test_x, test_num_outputs)
assert output_out.get_shape().as_list() == [None, 40],\
'Incorrect Shape. Found {} shape'.format(output_out.get_shape().as_list())
_print_success_message()
def test_conv_net(conv_net):
test_x = tf.placeholder(tf.float32, [None, 32, 32, 3])
test_k = tf.placeholder(tf.float32)
logits_out = conv_net(test_x, test_k)
assert logits_out.get_shape().as_list() == [None, 10],\
'Incorrect Model Output. Found {}'.format(logits_out.get_shape().as_list())
print('Neural Network Built!')
def test_train_nn(train_neural_network):
mock_session = tf.Session()
test_x = np.random.rand(128, 32, 32, 3)
test_y = np.random.rand(128, 10)
test_k = np.random.rand(1)
test_optimizer = tf.train.AdamOptimizer()
mock_session.run = MagicMock()
train_neural_network(mock_session, test_optimizer, test_k, test_x, test_y)
assert mock_session.run.called, 'Session not used'
_print_success_message()
| mit |
hoangminhitvn/flask | flask/lib/python2.7/site-packages/pip/_vendor/distlib/resources.py | 210 | 9664 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import bisect
import io
import logging
import os
import pkgutil
import shutil
import sys
import types
import zipimport
from . import DistlibException
from .util import cached_property, get_cache_base, path_to_cache_dir, Cache
logger = logging.getLogger(__name__)
cache = None # created when needed
class ResourceCache(Cache):
def __init__(self, base=None):
if base is None:
# Use native string to avoid issues on 2.x: see Python #20140.
base = os.path.join(get_cache_base(), str('resource-cache'))
super(ResourceCache, self).__init__(base)
def is_stale(self, resource, path):
"""
Is the cache stale for the given resource?
:param resource: The :class:`Resource` being cached.
:param path: The path of the resource in the cache.
:return: True if the cache is stale.
"""
# Cache invalidation is a hard problem :-)
return True
def get(self, resource):
"""
Get a resource into the cache,
:param resource: A :class:`Resource` instance.
:return: The pathname of the resource in the cache.
"""
prefix, path = resource.finder.get_cache_info(resource)
if prefix is None:
result = path
else:
result = os.path.join(self.base, self.prefix_to_dir(prefix), path)
dirname = os.path.dirname(result)
if not os.path.isdir(dirname):
os.makedirs(dirname)
if not os.path.exists(result):
stale = True
else:
stale = self.is_stale(resource, path)
if stale:
# write the bytes of the resource to the cache location
with open(result, 'wb') as f:
f.write(resource.bytes)
return result
class ResourceBase(object):
def __init__(self, finder, name):
self.finder = finder
self.name = name
class Resource(ResourceBase):
"""
A class representing an in-package resource, such as a data file. This is
not normally instantiated by user code, but rather by a
:class:`ResourceFinder` which manages the resource.
"""
is_container = False # Backwards compatibility
def as_stream(self):
"""
Get the resource as a stream.
This is not a property to make it obvious that it returns a new stream
each time.
"""
return self.finder.get_stream(self)
@cached_property
def file_path(self):
global cache
if cache is None:
cache = ResourceCache()
return cache.get(self)
@cached_property
def bytes(self):
return self.finder.get_bytes(self)
@cached_property
def size(self):
return self.finder.get_size(self)
class ResourceContainer(ResourceBase):
is_container = True # Backwards compatibility
@cached_property
def resources(self):
return self.finder.get_resources(self)
class ResourceFinder(object):
"""
Resource finder for file system resources.
"""
def __init__(self, module):
self.module = module
self.loader = getattr(module, '__loader__', None)
self.base = os.path.dirname(getattr(module, '__file__', ''))
def _adjust_path(self, path):
return os.path.realpath(path)
def _make_path(self, resource_name):
# Issue #50: need to preserve type of path on Python 2.x
# like os.path._get_sep
if isinstance(resource_name, bytes): # should only happen on 2.x
sep = b'/'
else:
sep = '/'
parts = resource_name.split(sep)
parts.insert(0, self.base)
result = os.path.join(*parts)
return self._adjust_path(result)
def _find(self, path):
return os.path.exists(path)
def get_cache_info(self, resource):
return None, resource.path
def find(self, resource_name):
path = self._make_path(resource_name)
if not self._find(path):
result = None
else:
if self._is_directory(path):
result = ResourceContainer(self, resource_name)
else:
result = Resource(self, resource_name)
result.path = path
return result
def get_stream(self, resource):
return open(resource.path, 'rb')
def get_bytes(self, resource):
with open(resource.path, 'rb') as f:
return f.read()
def get_size(self, resource):
return os.path.getsize(resource.path)
def get_resources(self, resource):
def allowed(f):
return f != '__pycache__' and not f.endswith(('.pyc', '.pyo'))
return set([f for f in os.listdir(resource.path) if allowed(f)])
def is_container(self, resource):
return self._is_directory(resource.path)
_is_directory = staticmethod(os.path.isdir)
class ZipResourceFinder(ResourceFinder):
"""
Resource finder for resources in .zip files.
"""
def __init__(self, module):
super(ZipResourceFinder, self).__init__(module)
archive = self.loader.archive
self.prefix_len = 1 + len(archive)
# PyPy doesn't have a _files attr on zipimporter, and you can't set one
if hasattr(self.loader, '_files'):
self._files = self.loader._files
else:
self._files = zipimport._zip_directory_cache[archive]
self.index = sorted(self._files)
def _adjust_path(self, path):
return path
def _find(self, path):
path = path[self.prefix_len:]
if path in self._files:
result = True
else:
if path and path[-1] != os.sep:
path = path + os.sep
i = bisect.bisect(self.index, path)
try:
result = self.index[i].startswith(path)
except IndexError:
result = False
if not result:
logger.debug('_find failed: %r %r', path, self.loader.prefix)
else:
logger.debug('_find worked: %r %r', path, self.loader.prefix)
return result
def get_cache_info(self, resource):
prefix = self.loader.archive
path = resource.path[1 + len(prefix):]
return prefix, path
def get_bytes(self, resource):
return self.loader.get_data(resource.path)
def get_stream(self, resource):
return io.BytesIO(self.get_bytes(resource))
def get_size(self, resource):
path = resource.path[self.prefix_len:]
return self._files[path][3]
def get_resources(self, resource):
path = resource.path[self.prefix_len:]
if path and path[-1] != os.sep:
path += os.sep
plen = len(path)
result = set()
i = bisect.bisect(self.index, path)
while i < len(self.index):
if not self.index[i].startswith(path):
break
s = self.index[i][plen:]
result.add(s.split(os.sep, 1)[0]) # only immediate children
i += 1
return result
def _is_directory(self, path):
path = path[self.prefix_len:]
if path and path[-1] != os.sep:
path += os.sep
i = bisect.bisect(self.index, path)
try:
result = self.index[i].startswith(path)
except IndexError:
result = False
return result
_finder_registry = {
type(None): ResourceFinder,
zipimport.zipimporter: ZipResourceFinder
}
try:
import _frozen_importlib
_finder_registry[_frozen_importlib.SourceFileLoader] = ResourceFinder
_finder_registry[_frozen_importlib.FileFinder] = ResourceFinder
except (ImportError, AttributeError):
pass
def register_finder(loader, finder_maker):
_finder_registry[type(loader)] = finder_maker
_finder_cache = {}
def finder(package):
"""
Return a resource finder for a package.
:param package: The name of the package.
:return: A :class:`ResourceFinder` instance for the package.
"""
if package in _finder_cache:
result = _finder_cache[package]
else:
if package not in sys.modules:
__import__(package)
module = sys.modules[package]
path = getattr(module, '__path__', None)
if path is None:
raise DistlibException('You cannot get a finder for a module, '
'only for a package')
loader = getattr(module, '__loader__', None)
finder_maker = _finder_registry.get(type(loader))
if finder_maker is None:
raise DistlibException('Unable to locate finder for %r' % package)
result = finder_maker(module)
_finder_cache[package] = result
return result
_dummy_module = types.ModuleType(str('__dummy__'))
def finder_for_path(path):
"""
Return a resource finder for a path, which should represent a container.
:param path: The path.
:return: A :class:`ResourceFinder` instance for the path.
"""
result = None
# calls any path hooks, gets importer into cache
pkgutil.get_importer(path)
loader = sys.path_importer_cache.get(path)
finder = _finder_registry.get(type(loader))
if finder:
module = _dummy_module
module.__file__ = os.path.join(path, '')
module.__loader__ = loader
result = finder(module)
return result
| bsd-3-clause |
eeshangarg/zulip | zerver/webhooks/freshping/tests.py | 1 | 1324 | from zerver.lib.test_classes import WebhookTestCase
class FreshpingHookTests(WebhookTestCase):
STREAM_NAME = "freshping"
URL_TEMPLATE = "/api/v1/external/freshping?api_key={api_key}&stream={stream}"
FIXTURE_DIR_NAME = "freshping"
def test_freshping_check_test(self) -> None:
"""
Tests if freshping check test is handled correctly
"""
expected_topic = "Freshping"
expected_message = "Freshping webhook has been successfully configured."
self.check_webhook("freshping_check_test", expected_topic, expected_message)
def test_freshping_check_unreachable(self) -> None:
"""
Tests if freshping check unreachable is handled correctly
"""
expected_topic = "Test Check"
expected_message = """
https://example.com has just become unreachable.
Error code: 521.
""".strip()
self.check_webhook("freshping_check_unreachable", expected_topic, expected_message)
def test_freshping_check_reachable(self) -> None:
"""
Tests if freshping check reachable is handled correctly
"""
expected_topic = "Test Check"
expected_message = "https://example.com is back up and no longer unreachable."
self.check_webhook("freshping_check_reachable", expected_topic, expected_message)
| apache-2.0 |
davisein/jitsudone | django/django/contrib/comments/__init__.py | 423 | 3333 | from django.conf import settings
from django.core import urlresolvers
from django.core.exceptions import ImproperlyConfigured
from django.contrib.comments.models import Comment
from django.contrib.comments.forms import CommentForm
from django.utils.importlib import import_module
DEFAULT_COMMENTS_APP = 'django.contrib.comments'
def get_comment_app():
"""
Get the comment app (i.e. "django.contrib.comments") as defined in the settings
"""
# Make sure the app's in INSTALLED_APPS
comments_app = get_comment_app_name()
if comments_app not in settings.INSTALLED_APPS:
raise ImproperlyConfigured("The COMMENTS_APP (%r) "\
"must be in INSTALLED_APPS" % settings.COMMENTS_APP)
# Try to import the package
try:
package = import_module(comments_app)
except ImportError:
raise ImproperlyConfigured("The COMMENTS_APP setting refers to "\
"a non-existing package.")
return package
def get_comment_app_name():
"""
Returns the name of the comment app (either the setting value, if it
exists, or the default).
"""
return getattr(settings, 'COMMENTS_APP', DEFAULT_COMMENTS_APP)
def get_model():
"""
Returns the comment model class.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_model"):
return get_comment_app().get_model()
else:
return Comment
def get_form():
"""
Returns the comment ModelForm class.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_form"):
return get_comment_app().get_form()
else:
return CommentForm
def get_form_target():
"""
Returns the target URL for the comment form submission view.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_form_target"):
return get_comment_app().get_form_target()
else:
return urlresolvers.reverse("django.contrib.comments.views.comments.post_comment")
def get_flag_url(comment):
"""
Get the URL for the "flag this comment" view.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_flag_url"):
return get_comment_app().get_flag_url(comment)
else:
return urlresolvers.reverse("django.contrib.comments.views.moderation.flag",
args=(comment.id,))
def get_delete_url(comment):
"""
Get the URL for the "delete this comment" view.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_delete_url"):
return get_comment_app().get_delete_url(comment)
else:
return urlresolvers.reverse("django.contrib.comments.views.moderation.delete",
args=(comment.id,))
def get_approve_url(comment):
"""
Get the URL for the "approve this comment from moderation" view.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_approve_url"):
return get_comment_app().get_approve_url(comment)
else:
return urlresolvers.reverse("django.contrib.comments.views.moderation.approve",
args=(comment.id,))
| bsd-3-clause |
rechner/Taxidi | notify/googlevoice/voice.py | 1 | 11693 | from conf import config
from util import *
import settings
import base64
import os
import re
qpat = re.compile(r'\?')
if settings.DEBUG:
import logging
logging.basicConfig()
log = logging.getLogger('PyGoogleVoice')
log.setLevel(logging.DEBUG)
else:
log = None
class Voice(object):
"""
Main voice instance for interacting with the Google Voice service
Handles login/logout and most of the baser HTTP methods
"""
def __init__(self):
install_opener(build_opener(HTTPCookieProcessor(CookieJar())))
for name in settings.FEEDS:
setattr(self, name, self.__get_xml_page(name))
setattr(self, 'message', self.__get_xml_page('message'))
######################
# Some handy methods
######################
def special(self):
"""
Returns special identifier for your session (if logged in)
"""
if hasattr(self, '_special') and getattr(self, '_special'):
return self._special
try:
try:
regex = bytes("('_rnr_se':) '(.+)'", 'utf8')
except TypeError:
regex = bytes("('_rnr_se':) '(.+)'")
except NameError:
regex = r"('_rnr_se':) '(.+)'"
try:
sp = re.search(regex, urlopen(settings.INBOX).read()).group(2)
except AttributeError:
sp = None
self._special = sp
return sp
special = property(special)
def login(self, email=None, passwd=None, smsKey=None):
"""
Login to the service using your Google Voice account
Credentials will be propmpted for if not given as args or in the ``~/.gvoice`` config file
"""
if hasattr(self, '_special') and getattr(self, '_special'):
return self
if email is None:
email = config.email
if email is None:
email = input('Email address: ')
if passwd is None:
passwd = config.password
if passwd is None:
from getpass import getpass
passwd = getpass()
from os import path
content = self.__do_page('login').read()
# holy hackjob
galx = re.search(r"name=\"GALX\"\s+value=\"([^\"]+)\"", content).group(1)
result = self.__do_page('login', {'Email': email, 'Passwd': passwd, 'GALX': galx})
if result.geturl() == getattr(settings, "SMSAUTH"):
content = self.__smsAuth(smsKey)
try:
smsToken = re.search(r"name=\"smsToken\"\s+value=\"([^\"]+)\"", content).group(1)
galx = re.search(r"name=\"GALX\"\s+value=\"([^\"]+)\"", content).group(1)
content = self.__do_page('login', {'smsToken': smsToken, 'service': "grandcentral", 'GALX': galx})
except AttributeError:
raise LoginError
del smsKey, smsToken, galx
del email, passwd
try:
assert self.special
except (AssertionError, AttributeError):
raise LoginError
return self
def __smsAuth(self, smsKey=None):
if smsKey is None:
smsKey = config.smsKey
if smsKey is None:
from getpass import getpass
smsPin = getpass("SMS PIN: ")
content = self.__do_page('smsauth', {'smsUserPin': smsPin}).read()
else:
smsKey = base64.b32decode(re.sub(r' ', '', smsKey), casefold=True).encode("hex")
content = self.__oathtoolAuth(smsKey)
try_count = 1
while "The code you entered didn't verify." in content and try_count < 5:
sleep_seconds = 10
try_count += 1
print 'invalid code, retrying after {0} seconds (attempt {1})'.format(sleep_seconds, try_count)
import time
time.sleep(sleep_seconds)
content = self.__oathtoolAuth(smsKey)
del smsKey
return content
def __oathtoolAuth(self, smsKey):
import commands
smsPin = commands.getstatusoutput('oathtool --totp '+smsKey)[1]
content = self.__do_page('smsauth', {'smsUserPin': smsPin}).read()
del smsPin
return content
def logout(self):
"""
Logs out an instance and makes sure it does not still have a session
"""
self.__do_page('logout')
del self._special
assert self.special == None
return self
def call(self, outgoingNumber, forwardingNumber=None, phoneType=None, subscriberNumber=None):
"""
Make a call to an ``outgoingNumber`` from your ``forwardingNumber`` (optional).
If you pass in your ``forwardingNumber``, please also pass in the correct ``phoneType``
"""
if forwardingNumber is None:
forwardingNumber = config.forwardingNumber
if phoneType is None:
phoneType = config.phoneType
self.__validate_special_page('call', {
'outgoingNumber': outgoingNumber,
'forwardingNumber': forwardingNumber,
'subscriberNumber': subscriberNumber or 'undefined',
'phoneType': phoneType,
'remember': '1'
})
__call__ = call
def cancel(self, outgoingNumber=None, forwardingNumber=None):
"""
Cancels a call matching outgoing and forwarding numbers (if given).
Will raise an error if no matching call is being placed
"""
self.__validate_special_page('cancel', {
'outgoingNumber': outgoingNumber or 'undefined',
'forwardingNumber': forwardingNumber or 'undefined',
'cancelType': 'C2C',
})
def phones(self):
"""
Returns a list of ``Phone`` instances attached to your account.
"""
return [Phone(self, data) for data in self.contacts['phones'].values()]
phones = property(phones)
def settings(self):
"""
Dict of current Google Voice settings
"""
return AttrDict(self.contacts['settings'])
settings = property(settings)
def send_sms(self, phoneNumber, text):
"""
Send an SMS message to a given ``phoneNumber`` with the given ``text`` message
"""
self.__validate_special_page('sms', {'phoneNumber': phoneNumber, 'text': text})
def search(self, query):
"""
Search your Google Voice Account history for calls, voicemails, and sms
Returns ``Folder`` instance containting matching messages
"""
return self.__get_xml_page('search', data='?q=%s' % quote(query))()
def archive(self, msg, archive=1):
"""
Archive the specified message by removing it from the Inbox.
"""
if isinstance(msg, Message):
msg = msg.id
assert is_sha1(msg), 'Message id not a SHA1 hash'
self.__messages_post('archive', msg, archive=archive)
def delete(self, msg, trash=1):
"""
Moves this message to the Trash. Use ``message.delete(0)`` to move it out of the Trash.
"""
if isinstance(msg, Message):
msg = msg.id
assert is_sha1(msg), 'Message id not a SHA1 hash'
self.__messages_post('delete', msg, trash=trash)
def download(self, msg, adir=None, filename=None):
"""
Download a voicemail or recorded call MP3 matching the given ``msg``
which can either be a ``Message`` instance, or a SHA1 identifier.
Saves files to ``adir`` (defaults to current directory).
Message hashes can be found in ``self.voicemail().messages`` for example.
Returns location of saved file.
"""
from os import path,getcwd
if isinstance(msg, Message):
msg = msg.id
assert is_sha1(msg), 'Message id not a SHA1 hash'
if adir is None:
adir = getcwd()
if filename is None:
filename = "%s.mp3" % msg
try:
response = self.__do_page('download', msg)
except:
raise DownloadError
fn = path.join(adir, filename)
fo = open(fn, 'wb')
fo.write(response.read())
fo.close()
return fn
def contacts(self):
"""
Partial data of your Google Account Contacts related to your Voice account.
For a more comprehensive suite of APIs, check out http://code.google.com/apis/contacts/docs/1.0/developers_guide_python.html
"""
if hasattr(self, '_contacts'):
return self._contacts
self._contacts = self.__get_xml_page('contacts')()
return self._contacts
contacts = property(contacts)
######################
# Helper methods
######################
def __do_page(self, page, data=None, headers={}, terms={}):
"""
Loads a page out of the settings and pass it on to urllib Request
"""
page = page.upper()
if isinstance(data, dict) or isinstance(data, tuple):
data = urlencode(data)
headers.update({'User-Agent': 'PyGoogleVoice/0.5'})
if log:
log.debug('%s?%s - %s' % (getattr(settings, page)[22:], data or '', headers))
if page in ('DOWNLOAD','XML_SEARCH'):
return urlopen(Request(getattr(settings, page) + data, None, headers))
if data:
headers.update({'Content-type': 'application/x-www-form-urlencoded;charset=utf-8'})
pageuri = getattr(settings, page)
if len (terms) > 0:
m = qpat.match(page)
if m:
pageuri += '&'
else:
pageuri += '?'
for i,k in enumerate(terms.keys()):
pageuri += k+'='+terms[k]
if i < len(terms)-1:
pageuri += '&'
return urlopen(Request(pageuri, data, headers))
def __validate_special_page(self, page, data={}, **kwargs):
"""
Validates a given special page for an 'ok' response
"""
data.update(kwargs)
load_and_validate(self.__do_special_page(page, data))
_Phone__validate_special_page = __validate_special_page
def __do_special_page(self, page, data=None, headers={}, terms={}):
"""
Add self.special to request data
"""
assert self.special, 'You must login before using this page'
if isinstance(data, tuple):
data += ('_rnr_se', self.special)
elif isinstance(data, dict):
data.update({'_rnr_se': self.special})
return self.__do_page(page, data, headers, terms)
_Phone__do_special_page = __do_special_page
def __get_xml_page(self, page, data=None, headers={}):
"""
Return XMLParser instance generated from given page
"""
return XMLParser(self, page, lambda terms={}: self.__do_special_page('XML_%s' % page.upper(), data, headers, terms).read())
def __messages_post(self, page, *msgs, **kwargs):
"""
Performs message operations, eg deleting,staring,moving
"""
data = kwargs.items()
for msg in msgs:
if isinstance(msg, Message):
msg = msg.id
assert is_sha1(msg), 'Message id not a SHA1 hash'
data += (('messages',msg),)
return self.__do_special_page(page, dict(data))
_Message__messages_post = __messages_post
| gpl-3.0 |
treejames/viewfinder | backend/www/test/facebook_utils.py | 13 | 7256 | #!/usr/bin/env python
#
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Facebook-specific test functions.
Creating facebook test users should only need to be done once--they
persist across unittest runs and are shared by all developers. Create
the universe of test users with:
% python -m viewfinder.backend.www.test.facebook_utils --create --num_users=<num>
Query users with:
% python -m viewfinder.backend.www.test.facebook_utils --query
Delete all existing test users with:
% python -m viewfinder.backend.www.test.facebook_utils --delete
- FacebookUtils: encapsulates Facebook utilities
"""
__author__ = 'spencer@emailscrubbed.com (Spencer Kimball)'
import json
import logging
import os
import random
import urllib
import urlparse
from tornado import httpclient, options
from viewfinder.backend.base import base_options, secrets, util
options.define('create', default=False,
help='Create test users, each with a random selection of friends')
options.define('query', default=False, help='Queries the list of facebook test users')
options.define('delete', default=False, help='Deletes all facebook test users')
options.define('num_users', default=100, help='Number of users for creation')
_FACEBOOK_APP_ACCESS_TOKEN_URL = 'https://graph.facebook.com/oauth/access_token'
_FACEBOOK_QUERY_TEST_USERS_URL = 'https://graph.facebook.com/%s/accounts/test-users'
_FACEBOOK_CREATE_TEST_USER_URL = 'https://graph.facebook.com/%s/accounts/test-users'
_FACEBOOK_DELETE_TEST_USER_URL = 'https://graph.facebook.com/%s'
_FACEBOOK_FRIEND_TEST_USER_URL = 'https://graph.facebook.com/%s/friends/%s'
_FACEBOOK_PERMISSIONS = 'offline_access,user_photos,friends_photos'
class FacebookUtils(object):
"""Provides utilities for interfacing with Facebook test user accounts.
"""
def __init__(self):
url = _FACEBOOK_APP_ACCESS_TOKEN_URL + '?' + \
urllib.urlencode({'client_id': secrets.GetSecret('facebook_api_key'),
'client_secret': secrets.GetSecret('facebook_secret'),
'grant_type': 'client_credentials'})
http_client = httpclient.HTTPClient()
response = http_client.fetch(url, request_timeout=100)
try:
self._access_token = urlparse.parse_qs(response.body)['access_token'][0]
except:
logging.error('unable to parse access token from response body: %s' % response.body)
raise
def CreateTestUser(self, name):
print 'creating user %s' % name
url = (_FACEBOOK_CREATE_TEST_USER_URL % secrets.GetSecret('facebook_api_key')) + '?' + \
urllib.urlencode({'installed': 'true',
'name': name,
'permissions': _FACEBOOK_PERMISSIONS,
'method': 'post',
'access_token': self._access_token})
http_client = httpclient.HTTPClient()
response = http_client.fetch(url, request_timeout=100)
try:
return json.loads(response.body)
except:
logging.error('unable to parse user data from response body: %s' % response.body)
raise
def DeleteTestUser(self, u):
assert 'access_token' in u and 'id' in u, u
print 'deleting user %s' % u['id']
url = (_FACEBOOK_DELETE_TEST_USER_URL % u['id']) + '?' + \
urllib.urlencode({'method': 'delete',
'access_token': u['access_token']})
http_client = httpclient.HTTPClient()
response = http_client.fetch(url, request_timeout=100)
assert response.body == 'true', 'deleting user: %r' % u
def QueryFacebookTestUsers(self, limit):
url = (_FACEBOOK_QUERY_TEST_USERS_URL % secrets.GetSecret('facebook_api_key')) + '?' + \
urllib.urlencode({'access_token': self._access_token, 'limit': limit})
http_client = httpclient.HTTPClient()
response = http_client.fetch(url, request_timeout=100)
try:
json_data = json.loads(response.body)
return json_data['data']
except:
logging.error('unable to query facebook test users: %s' % response.body)
raise
def CreateFacebookFriend(id1, at1, id2, at2, friendships):
if (id1, id2) in friendships:
print 'friendships between %s and %s already exists' % (id1, id2)
return
print 'creating friendship between user %s and %s' % (id1, id2)
try:
http_client = httpclient.HTTPClient()
url = (_FACEBOOK_FRIEND_TEST_USER_URL % (id1, id2)) + '?' + \
urllib.urlencode({'method': 'post', 'access_token': at1})
response = http_client.fetch(url, request_timeout=100)
assert response.body == 'true', 'friendship from user %s to %s' % (id1, id2)
url = (_FACEBOOK_FRIEND_TEST_USER_URL % (id2, id1)) + '?' + \
urllib.urlencode({'method': 'post', 'access_token': at2})
response = http_client.fetch(url, request_timeout=100)
assert response.body == 'true', 'friendship from user %s to %s' % (id2, id1)
friendships[(id1, id2)] = True
friendships[(id2, id1)] = True
except:
logging.error('unable to create connection')
def CreateFacebookTestUsers(self):
users = FacebookUtils.QueryFacebookTestUsers(limit=options.options.num_users)
with open(os.path.join(os.path.dirname(__file__), 'test_names'), 'r') as f:
names = f.readlines()
names = [name.strip() for name in names]
random.shuffle(names)
assert len(names) >= options.options.num_users
logging.info('creating %d Facebook test users (%d more)' % \
(options.options.num_users, options.options.num_users - len(users)))
for i in range(len(users), options.options.num_users):
users.append(FacebookUtils.CreateTestUser(names[i]))
logging.info('creating user connections...')
friendships = dict()
for cur_u in users:
max_friends = min(len(users) - 1, 20)
num_friends = random.randint(1, max_friends)
friends = set([(u['id'], u['access_token']) for i in xrange(num_friends) \
for u in [random.choice(users)] if u != cur_u])
logging.info('creating %d connections for user %s: %r' % (num_friends, cur_u['id'], friends))
for friend in friends:
FacebookUtils.CreateFacebookFriend(cur_u['id'], cur_u['access_token'], friend[0], friend[1], friendships)
def DeleteFacebookTestUsers(self):
logging.info('Deleting facebook users')
http_client = httpclient.HTTPClient()
users = FacebookUtils.QueryFacebookTestUsers(http_client, secrets.GetSecret('facebook_api_key'),
secrets.GetSecret('facebook_secret'), self._access_token,
limit=options.options.num_users)
[FacebookUtils.DeleteTestUser(u) for u in users]
def main():
options.parse_command_line()
options.options.domain = 'goviewfinder.com'
secrets.InitSecretsForTest()
fu = FacebookUtils()
# All of this synchronous stuff is slow, but it only needs to run once.
if options.options.delete:
fu.DeleteFacebookTestUsers()
if options.options.query:
users = fu.QueryFacebookTestUsers(limit=options.options.num_users)
for u in users:
print u.get('id', 'no id'), u.get('name', 'no name')
if options.options.create:
fu.CreateFacebookTestUsers()
if __name__ == '__main__':
main()
| apache-2.0 |
chokribr/invenio | invenio/legacy/websubmit/icon_creator.py | 13 | 36867 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2008, 2009, 2010, 2011, 2012 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""This is websubmit_icon_creator.py
This tool is used to create an icon of a picture file.
+ Python API:
Please see create_icon().
+ CLI API:
# $ python ~invenio/lib/python/invenio/websubmit_icon_creator.py \\
# --icon-scale=200 \\
# --icon-name=test-icon \\
# --icon-file-format=jpg \\
# test-image.jpg
# $ python ~invenio/lib/python/invenio/websubmit_icon_creator.py \\
# --icon-scale=200 \\
# --icon-name=test-icon2 \\
# --icon-file-format=gif \\
# --multipage-icon \\
# --multipage-icon-delay=50 \\
# test-image2.pdf
"""
__revision__ = "$Id$"
import os.path, sys, getopt, shutil, tempfile, re
from invenio.config import \
CFG_TMPDIR, \
CFG_PATH_PS2PDF, \
CFG_PATH_PDFTK, \
CFG_PATH_CONVERT
from invenio.utils.shell import escape_shell_arg
from invenio.legacy.websubmit.config import InvenioWebSubmitIconCreatorError
CFG_ALLOWED_FILE_EXTENSIONS = ["pdf", "gif", "jpg", \
"jpeg", "ps", "png", "bmp", \
"eps", "epsi", "epsf", \
"tiff", "tif"]
# ***** Functions related to the icon creation process: *****
# Accepted format for the ImageMagick 'scale' parameter:
re_imagemagic_scale_parameter_format = re.compile(r'x?\d+(x\d*)?(^|!|>|<|@|%)?$')
def create_working_directory():
"""Create a "working directory" in which the files related to the icon-
creation process can be stored, and return the full path to it.
The working directory will be created in ~invenio/var/tmp.
If it cannot be created there, an exception
(InvenioWebSubmitIconCreatorError) will be raised.
The working directory will have the prefix
"websubmit_icon_creator_", and could be given a name something like:
- websubmit_icon_creator_Tzs3St
@return: (string) - the full path to the working directory.
@Exceptions raised: InvenioWebSubmitIconCreatorError.
"""
## Create the temporary directory in which to place the files related to
## icon creation in ~invenio/var/tmp:
path_workingdir = None
try:
path_workingdir = tempfile.mkdtemp(prefix="websubmit_icon_creator_", \
dir="%s" % CFG_TMPDIR)
except OSError as err:
## Unable to create the temporary directory in ~invenio/var/tmp
msg = "Error: Unable to create a temporary working directory in " \
"which to carry out the icon creation process. An attempt was " \
"made to create the directory in [%s]; the error encountered " \
"was <%s>. Icon creation has failed." % (CFG_TMPDIR, str(err))
raise InvenioWebSubmitIconCreatorError(msg)
## return the path to the working-directory:
return path_workingdir
def copy_file_to_directory(source_file, destination_dir):
"""Attempt to copy an ordinary file from one location to a destination
directory, returning the name of the copied file if successful.
@param source_file: (string) - the name of the file to be copied
to the destination directory.
@param destination_dir: (string) - the path of the directory into
which the source file is to be copied.
@return: (string) - the name of the source file after it has been
copied to the destination directory (i.e. no leading path information.)
@Exceptions raised: (IOError) - upon failure to successfully copy the
source file to the destination directory.
"""
## Divide the input filename into path and basename:
(dummy, name_source_file) = os.path.split(source_file)
if name_source_file == "":
## The source file is just a path - not a valid filename.
msg = """Error: the name of the file to be copied was invalid."""
raise IOError(msg)
## Test to see whether source file is a real file and is readable:
if os.access("%s" % source_file, os.R_OK):
## File is readable. Copy it locally to the destination directory:
try:
shutil.copyfile("%s" % source_file, \
"%s/%s" % (destination_dir, name_source_file))
except IOError:
## Unable to copy the source file to the destination directory.
msg = """Error: Unable to copy source file [%s] to """ \
"""the destination directory [%s].""" \
% (source_file, destination_dir)
raise IOError(msg)
else:
## Unable to read the source file.
msg = """Error: Unable to copy source file [%s] to """ \
"""destination directory [%s]. (File not readable.)""" \
% (source_file, destination_dir)
raise IOError(msg)
## Now that the source file has been successfully copied to the destination
## directory, return its base name:
return name_source_file
def build_icon(path_workingdir,
source_filename,
source_filetype,
icon_name,
icon_filetype,
multipage_icon,
multipage_icon_delay,
icon_scale):
"""Whereas create_icon acts as the API for icon creation and therefore
deals with argument washing, temporary working directory creation,
etc, the build_icon function takes care of the actual creation of the
icon file itself by calling various shell tools.
To accomplish this, it relies upon the following parameters:
@param path_workingdir: (string) - the path to the working directory
in which all files related to the icon creation are stored.
@param source_filename: (string) - the filename of the original image
file.
@param source_filetype: (string) - the file type of the original image
file.
@param icon_name: (string) - the name that is to be given to the icon.
@param icon_filetype: (string) - the file type of the icon that is
to be created.
@param multipage_icon: (boolean) - a flag indicating whether or not
an icon with multiple pages (i.e. an animated gif icon) should be
created.
@param multipage_icon_delay: (integer) - the delay to be used between
frame changing for an icon with multiple pages (i.e. an animated gif.)
@param icon_scale: (integer) - the scaling information for the created
icon.
@return: (string) - the name of the created icon file (which will have
been created in the working directory "path_workingdir".)
@Exceptions raised: (InvenioWebSubmitIconCreatorError) - raised when
the icon creation process fails.
"""
##
## If the source file is a PS, convert it into a PDF:
if source_filetype == "ps":
## Convert the subject file from PostScript to PDF:
if source_filename[-3:].lower() == ".ps":
## The name of the file to be stamped has a PostScript extension.
## Strip it and give the name of the PDF file to be created a
## PDF extension:
created_pdfname = "%s.pdf" % source_filename[:-3]
elif len(source_filename.split(".")) > 1:
## The file name has an extension - strip it and add a PDF
## extension:
raw_name = source_filename[:source_filename.rfind(".")]
if raw_name != "":
created_pdfname = "%s.pdf" % raw_name
else:
## It would appear that the file had no extension and that its
## name started with a period. Just use the original name with
## a .pdf suffix:
created_pdfname = "%s.pdf" % source_filename
else:
## No extension - use the original name with a .pdf suffix:
created_pdfname = "%s.pdf" % source_filename
## Build the distilling command:
cmd_distill = """%(distiller)s %(ps-file-path)s """ \
"""%(pdf-file-path)s 2>/dev/null""" % \
{ 'distiller' : CFG_PATH_PS2PDF,
'ps-file-path' : escape_shell_arg("%s/%s" % \
(path_workingdir, \
source_filename)),
'pdf-file-path' : escape_shell_arg("%s/%s" % \
(path_workingdir, \
created_pdfname)),
}
## Distill the PS into a PDF:
errcode_distill = os.system(cmd_distill)
## Test to see whether the PS was distilled into a PDF without error:
if errcode_distill or \
not os.access("%s/%s" % (path_workingdir, created_pdfname), os.F_OK):
## The PDF file was not correctly created in the working directory.
## Unable to continue.
msg = "Error: Unable to correctly convert PostScript file [%s] to" \
" PDF. Cannot create icon." % source_filename
raise InvenioWebSubmitIconCreatorError(msg)
## Now assign the name of the created PDF file to subject_file:
source_filename = created_pdfname
##
## Treat the name of the icon:
if icon_name in (None, ""):
## Since no name has been provided for the icon, give it the same name
## as the source file, but with the prefix "icon-":
icon_name = "icon-%s" % source_filename
## Now if the icon name has an extension, strip it and add that of the
## icon file type:
if len(icon_name.split(".")) > 1:
## The icon file name has an extension - strip it and add the icon
## file type extension:
raw_name = icon_name[:icon_name.rfind(".")]
if raw_name != "":
icon_name = "%s.%s" % (raw_name, icon_filetype)
else:
## It would appear that the file had no extension and that its
## name started with a period. Just use the original name with
## the icon file type's suffix:
icon_name = "%s.%s" % (icon_name, icon_filetype)
else:
## The icon name had no extension. Use the original name with the
## icon file type's suffix:
icon_name = "%s.%s" % (icon_name, icon_filetype)
##
## If the source file type is PS or PDF, it may be necessary to separate
## the first page from the rest of the document and keep it for use as
## the icon. Do this if necessary:
if source_filetype in ("ps", "pdf") and \
(icon_filetype != "gif" or not multipage_icon):
## Either (a) the icon type isn't GIF (in which case it cannot
## be animated and must therefore be created _only_ from the
## document's first page; or (b) the icon type is GIF, but the
## icon is to be created from the first page of the document only.
## The first page of the PDF document must be separated and is to
## be used for icon creation:
source_file_first_page = "p1-%s" % source_filename
## Perform the separation:
cmd_get_first_page = \
"%(pdftk)s A=%(source-file-path)s " \
"cat A1 output %(first-page-path)s " \
"2>/dev/null" \
% { 'pdftk' : CFG_PATH_PDFTK,
'source-file-path' : escape_shell_arg("%s/%s" % \
(path_workingdir, source_filename)),
'first-page-path' : escape_shell_arg("%s/%s" % \
(path_workingdir, \
source_file_first_page)),
}
errcode_get_first_page = os.system(cmd_get_first_page)
## Check that the separation was successful:
if errcode_get_first_page or \
not os.access("%s/%s" % (path_workingdir, \
source_file_first_page), os.F_OK):
## Separation was unsuccessful.
msg = "Error: Unable to create an icon for file [%s/%s] - it " \
"wasn't possible to separate the first page from the " \
"rest of the document (error code [%s].)" \
% (path_workingdir, source_filename, errcode_get_first_page)
raise InvenioWebSubmitIconCreatorError(msg)
else:
## Successfully extracted the first page. Treat it as the source
## file for icon creation from now on:
source_filename = source_file_first_page
##
## Create the icon:
## If a delay is necessary for an animated gif icon, create the
## delay string:
delay_info = ""
if source_filetype in ("ps", "pdf") and \
icon_filetype == "gif" and multipage_icon:
## Include delay information:
delay_info = "-delay %s" % escape_shell_arg(str(multipage_icon_delay))
## Command for icon creation:
cmd_create_icon = "%(convert)s -colorspace rgb -auto-orient -scale %(scale)s %(delay)s " \
"%(source-file-path)s %(icon-file-path)s 2>/dev/null" \
% { 'convert' : CFG_PATH_CONVERT,
'scale' : \
escape_shell_arg(icon_scale),
'delay' : delay_info,
'source-file-path' : \
escape_shell_arg("%s/%s" \
% (path_workingdir, \
source_filename)),
'icon-file-path' : \
escape_shell_arg("%s/%s" \
% (path_workingdir, \
icon_name)),
}
errcode_create_icon = os.system(cmd_create_icon)
## Check that the icon creation was successful:
if errcode_create_icon or \
not os.access("%s/%s" % (path_workingdir, icon_name), os.F_OK):
## Icon creation was unsuccessful.
msg = "Error: Unable to create an icon for file [%s/%s] (error " \
"code [%s].)" \
% (path_workingdir, source_filename, errcode_create_icon)
raise InvenioWebSubmitIconCreatorError(msg)
##
## The icon was successfully created. Return its name:
return icon_name
def create_icon(options):
"""The driver for the icon creation process. This is effectively the
function that is responsible for coordinating the icon creation.
It is the API for the creation of an icon.
@param options: (dictionary) - a dictionary of options that are required
by the function in order to carry out the icon-creation process.
The dictionary must have the following structure:
+ input-file: (string) - the path to the input file (i.e. that
which is to be stamped;
+ icon-name: (string) - the name of the icon that is to be created
by the program. This is optional - if not provided,
a default name will be applied to the icon file instead;
+ multipage-icon: (boolean) - used only when the original file
is a PDF or PS file. If False, the created icon will feature ONLY
the first page of the PDF. If True, ALL pages of the PDF will
be included in the created icon. Note: If the icon type is not
gif, this flag will be forced as False.
+ multipage-icon-delay: (integer) - used only when the original
file is a PDF or PS AND use-first-page-only is False AND
the icon type is gif.
This allows the user to specify the delay between "pages"
of a multi-page (animated) icon.
+ icon-scale: ('geometry') - the scaling information to be used for the
creation of the new icon. Type 'geometry' as defined in ImageMagick.
(eg. 320 or 320x240 or 100> or 5%)
+ icon-file-format: (string) - the file format of the icon that is
to be created. Legal values are:
* pdf
* gif
* jpg
* jpeg
* ps
* png
* bmp
+ verbosity: (integer) - the verbosity level under which the program
is to run;
So, an example of the returned dictionary could be something like:
{ 'input-file' : "demo-picture-file.jpg",
'icon-name' : "icon-demo-picture-file",
'icon-file-format' : "gif",
'multipage-icon' : True,
'multipage-icon-delay' : 100,
'icon-scale' : 180,
'verbosity' : 0,
}
@return: (tuple) - consisting of two strings:
1. the path to the working directory in which all files related to
icon creation are stored;
2. The name of the "icon" file;
@Exceptions raised: (InvenioWebSubmitIconCreatorError)
be raised or propagated by this function when the icon creation process
fails for one reason or another.
"""
## SANITY CHECKS:
## Does the options dictionary contain all expected keys?
##
## A list of the names of the expected options:
expected_option_names = ['input-file', \
'icon-name', \
'icon-file-format', \
'multipage-icon', \
'multipage-icon-delay', \
'icon-scale', \
'verbosity']
expected_option_names.sort()
## A list of the option names that have been received:
received_option_names = options.keys()
received_option_names.sort()
if expected_option_names != received_option_names:
## Error: he dictionary of options had an illegal structure:
msg = """Error: Unexpected value received for "options" parameter."""
raise InvenioWebSubmitIconCreatorError(msg)
## Do we have an input file to work on?
if options["input-file"] in (None, ""):
## No input file - stop the icon creation:
msg = "Error: unable to determine the name of the file from which " \
"the icon is to be created."
raise InvenioWebSubmitIconCreatorError(msg)
else:
## Get the file type of the input file:
tmp_file_extension = options["input-file"].split(".")[-1]
## allow also Invenio files that use the format: filename.ext;format;subformat;version
tmp_file_extension = tmp_file_extension.split(';')[0]
if tmp_file_extension.lower() not in CFG_ALLOWED_FILE_EXTENSIONS:
## Ilegal input file type.
msg = "Error: icons can be only be created from %s files, " \
"not [%s]." % (str(CFG_ALLOWED_FILE_EXTENSIONS), \
tmp_file_extension.lower())
raise InvenioWebSubmitIconCreatorError(msg)
else:
subject_filetype = tmp_file_extension.lower()
## Wash the requested icon name:
if type(options["icon-name"]) is not str:
options["icon-name"] = ""
else:
(dummy, name_iconfile) = os.path.split(options["icon-name"])
if name_iconfile != "":
## Take just the basename component of the icon file:
options["icon-name"] = name_iconfile
## Do we have an icon file format?
icon_format = options["icon-file-format"]
if icon_format in (None, ""):
## gif by default:
options["icon-file-format"] = "gif"
elif str(icon_format).lower() not in CFG_ALLOWED_FILE_EXTENSIONS:
## gif if an invalid icon type was supplied:
options["icon-file-format"] = "gif"
else:
## Use the provided icon type:
options["icon-file-format"] = icon_format.lower()
## Wash the use-first-page-only flag according to the type of the
## requested icon:
if options["icon-file-format"] != "gif":
## Since the request icon isn't a gif file, it can't be animated
## and should be created from the first "page" of the original file:
options["multipage-icon"] = False
else:
## The requested icon is a gif. Verify that the multipage-icon
## flag is a boolean value. If not, set it to False by default:
if type(options["multipage-icon"]) is not bool:
## Non-boolean value: default to False:
options["multipage-icon"] = False
## Wash the delay time for frames in an animated gif icon:
if type(options["multipage-icon-delay"]) is not int:
## Invalid value - set it to default:
options["multipage-icon-delay"] = 100
elif options["multipage-icon-delay"] < 0:
## Can't have negative delays:
options["multipage-icon-delay"] = 100
## Wash the icon scaling information:
if not re_imagemagic_scale_parameter_format.match(options["icon-scale"]):
## Ivalid value - set it to default:
options["icon-scale"] = "180"
## OK. Begin the icon creation process:
##
## Create a working directory for the icon creation process and get the
## full path to it:
path_workingdir = create_working_directory()
## Copy the file from which the icon is to be created into the
## working directory:
try:
basename_source_file = \
copy_file_to_directory(options["input-file"], path_workingdir)
except IOError as err:
## Unable to copy the source file to the working directory.
msg = "Icon creation failed: unable to copy the source image file " \
"to the working directory. Got this error: [%s]" % str(err)
raise InvenioWebSubmitIconCreatorError(msg)
## Create the icon and get its name:
icon_name = build_icon(path_workingdir, \
basename_source_file, \
subject_filetype, \
options["icon-name"], \
options["icon-file-format"], \
options["multipage-icon"], \
options["multipage-icon-delay"], \
options["icon-scale"])
## Return a tuple containing the working directory and the name of the
## icon file to the caller:
return (path_workingdir, icon_name)
# ***** Functions Specific to CLI calling of the program: *****
def usage(wmsg="", err_code=0):
"""Print a "usage" message (along with an optional additional warning/error
message) to stderr and exit with a given error code.
@param wmsg: (string) - some kind of warning message for the user.
@param err_code: (integer) - an error code to be passed to sys.exit,
which is called after the usage message has been printed.
@return: None.
"""
## Wash the warning message:
if wmsg != "":
wmsg = wmsg.strip() + "\n"
## The usage message:
msg = """ Usage:
python ~invenio/lib/python/invenio/websubmit_icon_creator.py \\
[options] input-file.jpg
websubmit_icon_creator.py is used to create an icon for an image.
Options:
-h, --help Print this help.
-V, --version Print version information.
-v, --verbose=LEVEL Verbose level (0=min, 1=default, 9=max).
[NOT IMPLEMENTED]
-s, --icon-scale
Scaling information for the icon that is to
be created. Must be an integer. Defaults to
180.
-m, --multipage-icon
A flag to indicate that the icon should
consist of multiple pages. Will only be
respected if the requested icon type is GIF
and the input file is a PS or PDF consisting
of several pages.
-d, --multipage-icon-delay=VAL
If the icon consists of several pages and is
an animated GIF, a delay between frames can
be specified. Must be an integer. Defaults
to 100.
-f, --icon-file-format=FORMAT
The file format of the icon to be created.
Must be one of:
[pdf, gif, jpg, jpeg, ps, png, bmp]
Defaults to gif.
-o, --icon-name=XYZ
The optional name to be given to the created
icon file. If this is omitted, the icon file
will be given the same name as the input
file, but will be prefixed by "icon-";
Examples:
python ~invenio/lib/python/invenio/websubmit_icon_creator.py \\
--icon-scale=200 \\
--icon-name=test-icon \\
--icon-file-format=jpg \\
test-image.jpg
python ~invenio/lib/python/invenio/websubmit_icon_creator.py \\
--icon-scale=200 \\
--icon-name=test-icon2 \\
--icon-file-format=gif \\
--multipage-icon \\
--multipage-icon-delay=50 \\
test-image2.pdf
"""
sys.stderr.write(wmsg + msg)
sys.exit(err_code)
def get_cli_options():
"""From the options and arguments supplied by the user via the CLI,
build a dictionary of options to drive websubmit-icon-creator.
For reference, the CLI options available to the user are as follows:
-h, --help -> Display help/usage message and exit;
-V, --version -> Display version information and exit;
-v, --verbose= -> Set verbosity level (0=min, 1=default,
9=max).
-s, --icon-scale -> Scaling information for the icon that
is to be created. Must be of
type 'geometry', as understood
by ImageMagick (Eg. 320 or
320x240 or 100>). Defaults to
180.
-m, --multipage-icon -> A flag to indicate that the icon should
consist of multiple pages. Will only be
respected if the requested icon type is
GIF and the input file is a PS or PDF
consisting of several pages.
-d, --multipage-icon-delay= -> If the icon consists of several pages
and is an animated GIF, a delay between
frames can be specified. Must be an
integer. Defaults to 100.
-f, --icon-file-format= -> The file format of the icon to be
created. Must be one of:
[pdf, gif, jpg, jpeg, ps, png, bmp]
Defaults to gif.
-o, --icon-name= -> The optional name to be given to the
created icon file. If this is omitted,
the icon file will be given the same
name as the input file, but will be
prefixed by "icon-";
@return: (dictionary) of input options and flags, set as
appropriate. The dictionary has the following structure:
+ input-file: (string) - the path to the input file (i.e. that
which is to be stamped;
+ icon-name: (string) - the name of the icon that is to be created
by the program. This is optional - if not provided,
a default name will be applied to the icon file instead;
+ multipage-icon: (boolean) - used only when the original file
is a PDF or PS file. If False, the created icon will feature ONLY
the first page of the PDF. If True, ALL pages of the PDF will
be included in the created icon. Note: If the icon type is not
gif, this flag will be forced as False.
+ multipage-icon-delay: (integer) - used only when the original
file is a PDF or PS AND use-first-page-only is False AND
the icon type is gif.
This allows the user to specify the delay between "pages"
of a multi-page (animated) icon.
+ icon-scale: (integer) - the scaling information to be used for the
creation of the new icon.
+ icon-file-format: (string) - the file format of the icon that is
to be created. Legal values are:
[pdf, gif, jpg, jpeg, ps, png, bmp]
+ verbosity: (integer) - the verbosity level under which the program
is to run;
So, an example of the returned dictionary could be something like:
{ 'input-file' : "demo-picture-file.jpg",
'icon-name' : "icon-demo-picture-file",
'icon-file-format' : "gif",
'multipage-icon' : True,
'multipage-icon-delay' : 100,
'icon-scale' : 180,
'verbosity' : 0,
}
"""
## dictionary of important values relating to cli call of program:
options = { 'input-file' : "",
'icon-name' : "",
'icon-file-format' : "",
'multipage-icon' : False,
'multipage-icon-delay' : 100,
'icon-scale' : 180,
'verbosity' : 0,
}
## Get the options and arguments provided by the user via the CLI:
try:
myoptions, myargs = getopt.getopt(sys.argv[1:], "hVv:s:md:f:o:", \
["help",
"version",
"verbosity=",
"icon-scale=",
"multipage-icon",
"multipage-icon-delay=",
"icon-file-format=",
"icon-name="])
except getopt.GetoptError as err:
## Invalid option provided - usage message
usage(wmsg="Error: %(msg)s." % { 'msg' : str(err) })
## Get the input file from the arguments list (it should be the
## first argument):
if len(myargs) > 0:
options["input-file"] = myargs[0]
## Extract the details of the options:
for opt in myoptions:
if opt[0] in ("-V","--version"):
## version message and exit
sys.stdout.write("%s\n" % __revision__)
sys.stdout.flush()
sys.exit(0)
elif opt[0] in ("-h","--help"):
## help message and exit
usage()
elif opt[0] in ("-v", "--verbosity"):
## Get verbosity level:
if not opt[1].isdigit():
options['verbosity'] = 0
elif int(opt[1]) not in xrange(0, 10):
options['verbosity'] = 0
else:
options['verbosity'] = int(opt[1])
elif opt[0] in ("-o", "--icon-name"):
## Get the name of the icon that is to be created:
options["icon-name"] = opt[1]
elif opt[0] in ("-f", "--icon-file-format"):
## The file format of the icon file:
if str(opt[1]).lower() not in CFG_ALLOWED_FILE_EXTENSIONS:
## Illegal file format requested for icon:
usage()
else:
## gif if an invalid icon type was supplied:
options["icon-file-format"] = str(opt[1]).lower()
elif opt[0] in ("-m","--multipage-icon"):
## The user would like a multipage (animated) icon:
options['multipage-icon'] = True
elif opt[0] in ("-d", "--multipage-icon-delay"):
## The delay to be used in the case of a multipage (animated) icon:
try:
frame_delay = int(opt[1])
except ValueError:
## Invalid value for delay supplied. Usage message.
usage()
else:
if frame_delay >= 0:
options['multipage-icon-delay'] = frame_delay
elif opt[0] in ("-s", "--icon-scale"):
## The scaling information for the icon:
if re_imagemagic_scale_parameter_format.match(opt[1]):
options['icon-scale'] = opt[1]
else:
usage()
##
## Done. Return the dictionary of options:
return options
def create_icon_cli():
"""The function responsible for triggering the icon creation process when
called via the CLI.
This function will effectively get the CLI options, then pass them to
function that is responsible for coordinating the icon creation process
itself.
Once stamping has been completed, an attempt will be made to copy the
icon file to the current working directory. If this can't be done, the
path to the icon will be printed to stdout instead.
"""
## Get CLI options and arguments:
input_options = get_cli_options()
## Create the icon file and obtain the name of the working directory in
## which the icon file is situated and the name of the icon file:
try:
(working_dir, icon_file) = create_icon(input_options)
except InvenioWebSubmitIconCreatorError as err:
## Something went wrong:
sys.stderr.write("Icon creation failed: [%s]\n" % str(err))
sys.stderr.flush()
sys.exit(1)
if not os.access("./%s" % icon_file, os.F_OK):
## Copy the icon file into the current directory:
try:
shutil.copyfile("%s/%s" % (working_dir, icon_file), \
"./%s" % icon_file)
except IOError:
## Report that it wasn't possible to copy the icon file locally
## and offer the user a path to it:
msg = "It was not possible to copy the icon file to the " \
"current working directory.\nYou can find it here: " \
"[%s/%s].\n" \
% (working_dir, icon_file)
sys.stderr.write(msg)
sys.stderr.flush()
else:
## A file exists in curdir with the same name as the final icon file.
## Just print out a message stating this fact, along with the path to
## the icon file in the temporary working directory:
msg = "The icon file [%s] has not been copied to the current " \
"working directory because a file with this name already " \
"existed there.\nYou can find the icon file here: " \
"[%s/%s].\n" % (icon_file, working_dir, icon_file)
sys.stderr.write(msg)
sys.stderr.flush()
# Start proceedings for CLI calls:
if __name__ == "__main__":
create_icon_cli()
| gpl-2.0 |
SnappleCap/oh-mainline | vendor/packages/celery/celery/tests/test_app/test_routes.py | 18 | 5437 | from __future__ import absolute_import
from __future__ import with_statement
from functools import wraps
from celery import routes
from celery import current_app
from celery.exceptions import QueueNotFound
from celery.utils import maybe_promise
from celery.tests.utils import unittest
def E(queues):
def expand(answer):
return routes.Router([], queues).expand_destination(answer)
return expand
def with_queues(**queues):
def patch_fun(fun):
@wraps(fun)
def __inner(*args, **kwargs):
app = current_app
prev_queues = app.conf.CELERY_QUEUES
prev_Queues = app.amqp.queues
app.conf.CELERY_QUEUES = queues
app.amqp.queues = app.amqp.Queues(queues)
try:
return fun(*args, **kwargs)
finally:
app.conf.CELERY_QUEUES = prev_queues
app.amqp.queues = prev_Queues
return __inner
return patch_fun
a_queue = {"exchange": "fooexchange",
"exchange_type": "fanout",
"binding_key": "xuzzy"}
b_queue = {"exchange": "barexchange",
"exchange_type": "topic",
"binding_key": "b.b.#"}
d_queue = {"exchange": current_app.conf.CELERY_DEFAULT_EXCHANGE,
"exchange_type": current_app.conf.CELERY_DEFAULT_EXCHANGE_TYPE,
"routing_key": current_app.conf.CELERY_DEFAULT_ROUTING_KEY}
class test_MapRoute(unittest.TestCase):
@with_queues(foo=a_queue, bar=b_queue)
def test_route_for_task_expanded_route(self):
expand = E(current_app.conf.CELERY_QUEUES)
route = routes.MapRoute({"celery.ping": {"queue": "foo"}})
self.assertDictContainsSubset(a_queue,
expand(route.route_for_task("celery.ping")))
self.assertIsNone(route.route_for_task("celery.awesome"))
@with_queues(foo=a_queue, bar=b_queue)
def test_route_for_task(self):
expand = E(current_app.conf.CELERY_QUEUES)
route = routes.MapRoute({"celery.ping": b_queue})
self.assertDictContainsSubset(b_queue,
expand(route.route_for_task("celery.ping")))
self.assertIsNone(route.route_for_task("celery.awesome"))
def test_expand_route_not_found(self):
expand = E(current_app.conf.CELERY_QUEUES)
route = routes.MapRoute({"a": {"queue": "x"}})
with self.assertRaises(QueueNotFound):
expand(route.route_for_task("a"))
class test_lookup_route(unittest.TestCase):
def test_init_queues(self):
router = routes.Router(queues=None)
self.assertDictEqual(router.queues, {})
@with_queues(foo=a_queue, bar=b_queue)
def test_lookup_takes_first(self):
R = routes.prepare(({"celery.ping": {"queue": "bar"}},
{"celery.ping": {"queue": "foo"}}))
router = routes.Router(R, current_app.conf.CELERY_QUEUES)
self.assertDictContainsSubset(b_queue,
router.route({}, "celery.ping",
args=[1, 2], kwargs={}))
@with_queues()
def test_expands_queue_in_options(self):
R = routes.prepare(())
router = routes.Router(R, current_app.conf.CELERY_QUEUES,
create_missing=True)
# apply_async forwards all arguments, even exchange=None etc,
# so need to make sure it's merged correctly.
route = router.route({"queue": "testq",
"exchange": None,
"routing_key": None,
"immediate": False},
"celery.ping",
args=[1, 2], kwargs={})
self.assertDictContainsSubset({"exchange": "testq",
"routing_key": "testq",
"immediate": False},
route)
self.assertIn("queue", route)
@with_queues(foo=a_queue, bar=b_queue)
def test_expand_destaintion_string(self):
x = routes.Router({}, current_app.conf.CELERY_QUEUES)
dest = x.expand_destination("foo")
self.assertEqual(dest["exchange"], "fooexchange")
@with_queues(foo=a_queue, bar=b_queue, **{
current_app.conf.CELERY_DEFAULT_QUEUE: d_queue})
def test_lookup_paths_traversed(self):
R = routes.prepare(({"celery.xaza": {"queue": "bar"}},
{"celery.ping": {"queue": "foo"}}))
router = routes.Router(R, current_app.amqp.queues)
self.assertDictContainsSubset(a_queue,
router.route({}, "celery.ping",
args=[1, 2], kwargs={}))
self.assertEqual(router.route({}, "celery.poza"),
dict(d_queue, queue=current_app.conf.CELERY_DEFAULT_QUEUE))
class test_prepare(unittest.TestCase):
def test_prepare(self):
from celery.datastructures import LRUCache
o = object()
R = [{"foo": "bar"},
"celery.datastructures.LRUCache",
o]
p = routes.prepare(R)
self.assertIsInstance(p[0], routes.MapRoute)
self.assertIsInstance(maybe_promise(p[1]), LRUCache)
self.assertIs(p[2], o)
self.assertEqual(routes.prepare(o), [o])
def test_prepare_item_is_dict(self):
R = {"foo": "bar"}
p = routes.prepare(R)
self.assertIsInstance(p[0], routes.MapRoute)
| agpl-3.0 |
aerickson/ansible | lib/ansible/modules/cloud/amazon/cloudtrail.py | 27 | 8794 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: cloudtrail
short_description: manage CloudTrail creation and deletion
description:
- Creates or deletes CloudTrail configuration. Ensures logging is also enabled.
version_added: "2.0"
author:
- "Ansible Core Team"
- "Ted Timmons"
requirements:
- "boto >= 2.21"
options:
state:
description:
- add or remove CloudTrail configuration.
required: true
choices: ['enabled', 'disabled']
name:
description:
- name for given CloudTrail configuration.
- This is a primary key and is used to identify the configuration.
s3_bucket_prefix:
description:
- bucket to place CloudTrail in.
- this bucket should exist and have the proper policy.
See U(http://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html)
- required when state=enabled.
required: false
s3_key_prefix:
description:
- prefix to keys in bucket. A trailing slash is not necessary and will be removed.
required: false
include_global_events:
description:
- record API calls from global services such as IAM and STS?
required: false
default: false
choices: ["true", "false"]
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
version_added: "1.5"
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
version_added: "1.5"
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
version_added: "1.5"
extends_documentation_fragment: aws
"""
EXAMPLES = """
- name: enable cloudtrail
local_action:
module: cloudtrail
state: enabled
name: main
s3_bucket_name: ourbucket
s3_key_prefix: cloudtrail
region: us-east-1
- name: enable cloudtrail with different configuration
local_action:
module: cloudtrail
state: enabled
name: main
s3_bucket_name: ourbucket2
s3_key_prefix: ''
region: us-east-1
- name: remove cloudtrail
local_action:
module: cloudtrail
state: disabled
name: main
region: us-east-1
"""
HAS_BOTO = False
try:
import boto
import boto.cloudtrail
from boto.regioninfo import RegionInfo
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_ec2_creds
class CloudTrailManager:
"""Handles cloudtrail configuration"""
def __init__(self, module, region=None, **aws_connect_params):
self.module = module
self.region = region
self.aws_connect_params = aws_connect_params
self.changed = False
try:
self.conn = connect_to_aws(boto.cloudtrail, self.region, **self.aws_connect_params)
except boto.exception.NoAuthHandlerFound as e:
self.module.fail_json(msg=str(e))
def view_status(self, name):
return self.conn.get_trail_status(name)
def view(self, name):
ret = self.conn.describe_trails(trail_name_list=[name])
trailList = ret.get('trailList', [])
if len(trailList) == 1:
return trailList[0]
return None
def exists(self, name=None):
ret = self.view(name)
if ret:
return True
return False
def enable_logging(self, name):
'''Turn on logging for a cloudtrail that already exists. Throws Exception on error.'''
self.conn.start_logging(name)
def enable(self, **create_args):
return self.conn.create_trail(**create_args)
def update(self, **create_args):
return self.conn.update_trail(**create_args)
def delete(self, name):
'''Delete a given cloudtrial configuration. Throws Exception on error.'''
self.conn.delete_trail(name)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state={'required': True, 'choices': ['enabled', 'disabled']},
name={'required': True, 'type': 'str'},
s3_bucket_name={'required': False, 'type': 'str'},
s3_key_prefix={'default': '', 'required': False, 'type': 'str'},
include_global_events={'default': True, 'required': False, 'type': 'bool'},
))
required_together = (['state', 's3_bucket_name'])
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together)
if not HAS_BOTO:
module.fail_json(msg='boto is required.')
ec2_url, access_key, secret_key, region = get_ec2_creds(module)
aws_connect_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
ct_name = module.params['name']
s3_bucket_name = module.params['s3_bucket_name']
# remove trailing slash from the key prefix, really messes up the key structure.
s3_key_prefix = module.params['s3_key_prefix'].rstrip('/')
include_global_events = module.params['include_global_events']
#if module.params['state'] == 'present' and 'ec2_elbs' not in module.params:
# module.fail_json(msg="ELBs are required for registration or viewing")
cf_man = CloudTrailManager(module, region=region, **aws_connect_params)
results = { 'changed': False }
if module.params['state'] == 'enabled':
results['exists'] = cf_man.exists(name=ct_name)
if results['exists']:
results['view'] = cf_man.view(ct_name)
# only update if the values have changed.
if results['view']['S3BucketName'] != s3_bucket_name or \
results['view'].get('S3KeyPrefix', '') != s3_key_prefix or \
results['view']['IncludeGlobalServiceEvents'] != include_global_events:
if not module.check_mode:
results['update'] = cf_man.update(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix,
include_global_service_events=include_global_events)
results['changed'] = True
else:
if not module.check_mode:
# doesn't exist. create it.
results['enable'] = cf_man.enable(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix,
include_global_service_events=include_global_events)
results['changed'] = True
# given cloudtrail should exist now. Enable the logging.
results['view_status'] = cf_man.view_status(ct_name)
results['was_logging_enabled'] = results['view_status'].get('IsLogging', False)
if not results['was_logging_enabled']:
if not module.check_mode:
cf_man.enable_logging(ct_name)
results['logging_enabled'] = True
results['changed'] = True
# delete the cloudtrai
elif module.params['state'] == 'disabled':
# check to see if it exists before deleting.
results['exists'] = cf_man.exists(name=ct_name)
if results['exists']:
# it exists, so we should delete it and mark changed.
if not module.check_mode:
cf_man.delete(ct_name)
results['changed'] = True
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
CeltonMcGrath/TACTIC | 3rd_party/CherryPy/cherrypy/test/sessiondemo.py | 7 | 5376 | #!/usr/bin/python
"""A session demonstration app."""
import calendar
from datetime import datetime
import sys
import cherrypy
from cherrypy.lib import sessions
page = """
<html>
<head>
<style type='text/css'>
table { border-collapse: collapse; border: 1px solid #663333; }
th { text-align: right; background-color: #663333; color: white; padding: 0.5em; }
td { white-space: pre-wrap; font-family: monospace; padding: 0.5em;
border: 1px solid #663333; }
.warn { font-family: serif; color: #990000; }
</style>
<script type="text/javascript">
<!--
function twodigit(d) { return d < 10 ? "0" + d : d; }
function formattime(t) {
var month = t.getUTCMonth() + 1;
var day = t.getUTCDate();
var year = t.getUTCFullYear();
var hours = t.getUTCHours();
var minutes = t.getUTCMinutes();
return (year + "/" + twodigit(month) + "/" + twodigit(day) + " " +
hours + ":" + twodigit(minutes) + " UTC");
}
function interval(s) {
// Return the given interval (in seconds) as an English phrase
var seconds = s %% 60;
s = Math.floor(s / 60);
var minutes = s %% 60;
s = Math.floor(s / 60);
var hours = s %% 24;
var v = twodigit(hours) + ":" + twodigit(minutes) + ":" + twodigit(seconds);
var days = Math.floor(s / 24);
if (days != 0) v = days + ' days, ' + v;
return v;
}
var fudge_seconds = 5;
function init() {
// Set the content of the 'btime' cell.
var currentTime = new Date();
var bunixtime = Math.floor(currentTime.getTime() / 1000);
var v = formattime(currentTime);
v += " (Unix time: " + bunixtime + ")";
var diff = Math.abs(%(serverunixtime)s - bunixtime);
if (diff > fudge_seconds) v += "<p class='warn'>Browser and Server times disagree.</p>";
document.getElementById('btime').innerHTML = v;
// Warn if response cookie expires is not close to one hour in the future.
// Yes, we want this to happen when wit hit the 'Expire' link, too.
var expires = Date.parse("%(expires)s") / 1000;
var onehour = (60 * 60);
if (Math.abs(expires - (bunixtime + onehour)) > fudge_seconds) {
diff = Math.floor(expires - bunixtime);
if (expires > (bunixtime + onehour)) {
var msg = "Response cookie 'expires' date is " + interval(diff) + " in the future.";
} else {
var msg = "Response cookie 'expires' date is " + interval(0 - diff) + " in the past.";
}
document.getElementById('respcookiewarn').innerHTML = msg;
}
}
//-->
</script>
</head>
<body onload='init()'>
<h2>Session Demo</h2>
<p>Reload this page. The session ID should not change from one reload to the next</p>
<p><a href='../'>Index</a> | <a href='expire'>Expire</a> | <a href='regen'>Regenerate</a></p>
<table>
<tr><th>Session ID:</th><td>%(sessionid)s<p class='warn'>%(changemsg)s</p></td></tr>
<tr><th>Request Cookie</th><td>%(reqcookie)s</td></tr>
<tr><th>Response Cookie</th><td>%(respcookie)s<p id='respcookiewarn' class='warn'></p></td></tr>
<tr><th>Session Data</th><td>%(sessiondata)s</td></tr>
<tr><th>Server Time</th><td id='stime'>%(servertime)s (Unix time: %(serverunixtime)s)</td></tr>
<tr><th>Browser Time</th><td id='btime'> </td></tr>
<tr><th>Cherrypy Version:</th><td>%(cpversion)s</td></tr>
<tr><th>Python Version:</th><td>%(pyversion)s</td></tr>
</table>
</body></html>
"""
class Root(object):
def page(self):
changemsg = []
if cherrypy.session.id != cherrypy.session.originalid:
if cherrypy.session.originalid is None:
changemsg.append('Created new session because no session id was given.')
if cherrypy.session.missing:
changemsg.append('Created new session due to missing (expired or malicious) session.')
if cherrypy.session.regenerated:
changemsg.append('Application generated a new session.')
try:
expires = cherrypy.response.cookie['session_id']['expires']
except KeyError:
expires = ''
return page % {
'sessionid': cherrypy.session.id,
'changemsg': '<br>'.join(changemsg),
'respcookie': cherrypy.response.cookie.output(),
'reqcookie': cherrypy.request.cookie.output(),
'sessiondata': cherrypy.session.items(),
'servertime': datetime.utcnow().strftime("%Y/%m/%d %H:%M") + " UTC",
'serverunixtime': calendar.timegm(datetime.utcnow().timetuple()),
'cpversion': cherrypy.__version__,
'pyversion': sys.version,
'expires': expires,
}
def index(self):
# Must modify data or the session will not be saved.
cherrypy.session['color'] = 'green'
return self.page()
index.exposed = True
def expire(self):
sessions.expire()
return self.page()
expire.exposed = True
def regen(self):
cherrypy.session.regenerate()
# Must modify data or the session will not be saved.
cherrypy.session['color'] = 'yellow'
return self.page()
regen.exposed = True
if __name__ == '__main__':
cherrypy.config.update({
#'environment': 'production',
'log.screen': True,
'tools.sessions.on': True,
})
cherrypy.quickstart(Root())
| epl-1.0 |
pschmitt/home-assistant | homeassistant/components/home_connect/entity.py | 21 | 1999 | """Home Connect entity base class."""
import logging
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from .api import HomeConnectDevice
from .const import DOMAIN, SIGNAL_UPDATE_ENTITIES
_LOGGER = logging.getLogger(__name__)
class HomeConnectEntity(Entity):
"""Generic Home Connect entity (base class)."""
def __init__(self, device: HomeConnectDevice, desc: str) -> None:
"""Initialize the entity."""
self.device = device
self.desc = desc
self._name = f"{self.device.appliance.name} {desc}"
async def async_added_to_hass(self):
"""Register callbacks."""
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_UPDATE_ENTITIES, self._update_callback
)
)
@callback
def _update_callback(self, ha_id):
"""Update data."""
if ha_id == self.device.appliance.haId:
self.async_entity_update()
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the node (used for Entity_ID)."""
return self._name
@property
def unique_id(self):
"""Return the unique id base on the id returned by Home Connect and the entity name."""
return f"{self.device.appliance.haId}-{self.desc}"
@property
def device_info(self):
"""Return info about the device."""
return {
"identifiers": {(DOMAIN, self.device.appliance.haId)},
"name": self.device.appliance.name,
"manufacturer": self.device.appliance.brand,
"model": self.device.appliance.vib,
}
@callback
def async_entity_update(self):
"""Update the entity."""
_LOGGER.debug("Entity update triggered on %s", self)
self.async_schedule_update_ha_state(True)
| apache-2.0 |
rizar/groundhog-private | doc/conf.py | 28 | 6225 | # -*- coding: utf-8 -*-
#
# pylearn2 documentation build configuration file
# It is based on Theano documentation build
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('some/directory'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'numpydoc']#, 'ext']
#Needed otherwise, there is many autosummary error done by numpydo:
#https://github.com/phn/pytpm/issues/3#issuecomment-12133978
numpydoc_show_class_members = False
todo_include_todos = True
try:
from sphinx.ext import pngmath
extensions.append('sphinx.ext.pngmath')
except ImportError:
pass
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'Groundhog'
copyright = '2013, LISA lab'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = 'dev'
# The full version, including alpha/beta/rc tags.
release = 'dev'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_dirs = ['images', 'scripts', 'sandbox']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
#html_style = 'default.css'
html_theme = 'sphinxdoc'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = 'images/theano_logo-200x67.png'
#html_logo = 'images/theano_logo_allblue_200x46.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = [] # '.static', 'images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'theanodoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '11pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'groundhog.tex', 'GroundHog Documentation',
'LISA lab, University of Montreal', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = 'images/snake_theta2-trans.png'
latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| bsd-3-clause |
sumedh123/debatify | venv/lib/python2.7/site-packages/pip/vcs/bazaar.py | 514 | 3803 | from __future__ import absolute_import
import logging
import os
import tempfile
# TODO: Get this into six.moves.urllib.parse
try:
from urllib import parse as urllib_parse
except ImportError:
import urlparse as urllib_parse
from pip.utils import rmtree, display_path
from pip.vcs import vcs, VersionControl
from pip.download import path_to_url
logger = logging.getLogger(__name__)
class Bazaar(VersionControl):
name = 'bzr'
dirname = '.bzr'
repo_name = 'branch'
schemes = (
'bzr', 'bzr+http', 'bzr+https', 'bzr+ssh', 'bzr+sftp', 'bzr+ftp',
'bzr+lp',
)
def __init__(self, url=None, *args, **kwargs):
super(Bazaar, self).__init__(url, *args, **kwargs)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment or non_hierarchical
# Register lp but do not expose as a scheme to support bzr+lp.
if getattr(urllib_parse, 'uses_fragment', None):
urllib_parse.uses_fragment.extend(['lp'])
urllib_parse.non_hierarchical.extend(['lp'])
def export(self, location):
"""
Export the Bazaar repository at the url to the destination location
"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
if os.path.exists(location):
# Remove the location to make sure Bazaar can export it correctly
rmtree(location)
try:
self.run_command(['export', location], cwd=temp_dir,
show_stdout=False)
finally:
rmtree(temp_dir)
def switch(self, dest, url, rev_options):
self.run_command(['switch', url], cwd=dest)
def update(self, dest, rev_options):
self.run_command(['pull', '-q'] + rev_options, cwd=dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = ['-r', rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.info(
'Checking out %s%s to %s',
url,
rev_display,
display_path(dest),
)
self.run_command(['branch', '-q'] + rev_options + [url, dest])
def get_url_rev(self):
# hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it
url, rev = super(Bazaar, self).get_url_rev()
if url.startswith('ssh://'):
url = 'bzr+' + url
return url, rev
def get_url(self, location):
urls = self.run_command(['info'], show_stdout=False, cwd=location)
for line in urls.splitlines():
line = line.strip()
for x in ('checkout of branch: ',
'parent branch: '):
if line.startswith(x):
repo = line.split(x)[1]
if self._is_local_repository(repo):
return path_to_url(repo)
return repo
return None
def get_revision(self, location):
revision = self.run_command(
['revno'], show_stdout=False, cwd=location)
return revision.splitlines()[-1]
def get_src_requirement(self, dist, location):
repo = self.get_url(location)
if not repo:
return None
if not repo.lower().startswith('bzr:'):
repo = 'bzr+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
current_rev = self.get_revision(location)
return '%s@%s#egg=%s' % (repo, current_rev, egg_project_name)
def check_version(self, dest, rev_options):
"""Always assume the versions don't match"""
return False
vcs.register(Bazaar)
| mit |
UWNetworksLab/metasync | metasync/mapping.py | 1 | 7526 |
from params import *
import dbg, util
class DetMapInfo2:
def __init__(self, config, hspace, replica, version):
self.config = config
self.replica = replica
self.hspace = hspace
self.version = version
self._load(config)
def _load(self, config):
nspace = []
for n, size in config:
for s in range(size):
nspace.append((n,s))
assert len(nspace) < self.hspace
self.detmap = [None] * self.hspace
for i in range(self.hspace):
group = []
for n in nspace:
order = int(util.md5("%s.%s" % (i, n)))
group.append((n[0], order))
self.detmap[i] = sorted(group, key=lambda e:e[1])
def pack(self):
for i in range(self.hspace):
lst = []
prev = -1
for j in self.detmap[i]:
if(j[0] != prev):
lst.append((j[0],0))
prev = j[0]
self.detmap[i] = lst
def uniq(lst, n):
rtn = []
for (i, _) in lst:
if i in rtn:
continue
rtn.append(i)
if len(rtn) == n:
break
return rtn
class DetMap2:
# goal
# 1. non-uniformly locating blobs, approximately reflecting storage size of each node
# 2. minimize realigning on a node failure
# design
# 0. node -> (node, storage)
# (e.g., (1,0), (1,1) if node 1 has 2G storage)
# 1. fixed hspace, where h(blob) % hspace = index
# (hspace any large number, NOT depending on len(nodes))
# 2. detmap[index] -> a group of nodes
# (a group includes all nodes, but different order see 3)
# 3. order nodes in a group, by hash(index, node)
# (so deterministic)
# 4. in each group, pick first #replication nodes
# failure
# node change
# replication
# => in all of above situations, only blobs in old/new node
# will be re-balanced
#
def __init__(self, config, hspace=1024, replica=2, version=0):
self.version = version
self.mapinfo = {}
self.hspace = hspace
self.mapinfo[version] = DetMapInfo2(config, hspace, replica, version)
def reconfig(self, config, replica=2):
self.version += 1
self.mapinfo[self.version] \
= DetMapInfo2(config, self.hspace, replica, self.version)
def pack(self):
self.mapinfo[self.version].pack()
def get_remapping(self, hvs):
assert self.version > 0
added = {}
removed = {}
for srv, sz in (self.mapinfo[self.version].config + self.mapinfo[self.version-1].config):
added[srv] = []
removed[srv] = []
for hv in hvs:
old_map = self.get_mapping(hv, self.version - 1)
new_map = self.get_mapping(hv, self.version)
for srv in list(set(new_map) - set(old_map)):
added[srv].append(hv)
for srv in list(set(old_map) - set(new_map)):
removed[srv].append(hv)
return added,removed
# get mapping info of hash value (on the latest version)
def get_mapping(self, hv, version=None):
# latest version by default
if version is None:
version = self.version
# bigbang moment
if version < 0:
version = 0
if type(hv) is str:
hv = int(hv, 16)
ver_modulo = self.mapinfo[version].hspace
ver_replica = self.mapinfo[version].replica
i = hv % ver_modulo
ver_detmap = self.mapinfo[version].detmap[i]
return uniq(ver_detmap, ver_replica)
class DetMapInfo:
def __init__(self, config, replica, version):
self.config = config
self.replica = replica
self.version = version
self.distrib = map_to_distirb(config)
self.modulo = len(self.distrib)
def store(self):
return "%s:%s:%s" % (self.version, self.replica, self.config)
@classmethod
def restore(cls, store):
# poor man's marshaling
(version, replica, config) = store.split(":")
return DetMapInfo(eval(config), int(replica), int(version))
class DetMap:
#
# interesting aspects/requirements of our settings
#
# - heterogeneous nodes: different storage size
# - quick recovery (from local copy)
# - mininum info to keep the mapping and its changes
# - configuration changes (superset of node failure)
# - role of gc for balancing
#
# config: [(1, 2GB), (2, 5GB), (3, 2GB)]
def __init__(self, config, replica=2, version=0):
# normalize config
config.sort(key=lambda t:t[0])
self.version = version
self.mapinfo = {}
self.mapinfo[version] = DetMapInfo(config, replica, version)
@property
def replica(self):
return self.mapinfo[self.version].replica
@property
def config(self):
return self.mapinfo[self.version].config
@property
def distrib(self):
return self.mapinfo[self.version].distrib
@property
def modulo(self):
return self.mapinfo[self.version].modulo
def reconfig(self, config, replica=2):
# NOTE. do not support replica changes yet
assert replica == self.replica
self.version += 1
self.mapinfo[self.version] \
= DetMapInfo(config, replica, self.version)
# get mapping info of hash value (on the latest version)
def get_mapping(self, hv, version=None):
# latest version by default
if version is None:
version = self.version
# bigbang moment
if version < 0:
version = 0
if type(hv) is str:
hv = int(hv, 16)
ver_modulo = self.mapinfo[version].modulo
ver_replica = self.mapinfo[version].replica
ver_distrib = self.mapinfo[version].distrib
i = hv % ver_modulo
m = []
while len(m) != ver_replica:
v = ver_distrib[i]
if v not in m:
m.append(v)
i = (i + 1) % ver_modulo
return m
# get re-mapping info of hash value (against the previous one)
def get_remapping(self, hv):
old_map = self.get_mapping(hv, self.version - 1)
new_map = self.get_mapping(hv, self.version)
# rebalance missing blob
return list(set(new_map) - set(old_map))
def store(self):
ret = []
for (ver, info) in self.mapinfo.iteritems():
ret.append(info.store())
return "\n".join(ret)
@classmethod
def restore(cls, store):
mapinfo = {}
for l in store.splitlines():
info = DetMapInfo.restore(l)
mapinfo[info.version] = info
version = max(mapinfo.keys())
config = mapinfo[version].config
replica = mapinfo[version].replica
m = DetMap(config, replica, version)
m.mapinfo = mapinfo
return m
def __str__(self):
return "map:%s@%s" % (self.config, self.version)
# config: [(1, 2GB), (2, 5GB), (3, 2GB)]
# -> (1, 2, 3, 1, 2, 3, 2, 2, 2)
def normalized(config):
return [(id, size*100//GB) for (id, size) in config]
def map_to_distirb(config):
q = [[id] * cap for (id, cap) in normalized(config)]
m = []
i = 0
while len(q) != 0:
i %= len(q)
m.append(q[i].pop())
if len(q[i]) == 0:
del q[i]
continue
i += 1
return tuple(m) | mit |
odootr/odoo | addons/account_followup/wizard/account_followup_print.py | 217 | 16379 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import time
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_followup_stat_by_partner(osv.osv):
_name = "account_followup.stat.by.partner"
_description = "Follow-up Statistics by Partner"
_rec_name = 'partner_id'
_auto = False
def _get_invoice_partner_id(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for rec in self.browse(cr, uid, ids, context=context):
result[rec.id] = rec.partner_id.address_get(adr_pref=['invoice']).get('invoice', rec.partner_id.id)
return result
_columns = {
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'date_move':fields.date('First move', readonly=True),
'date_move_last':fields.date('Last move', readonly=True),
'date_followup':fields.date('Latest follow-up', readonly=True),
'max_followup_id': fields.many2one('account_followup.followup.line',
'Max Follow Up Level', readonly=True, ondelete="cascade"),
'balance':fields.float('Balance', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'invoice_partner_id': fields.function(_get_invoice_partner_id, type='many2one', relation='res.partner', string='Invoice Address')
}
_depends = {
'account.move.line': [
'account_id', 'company_id', 'credit', 'date', 'debit',
'followup_date', 'followup_line_id', 'partner_id', 'reconcile_id',
],
'account.account': ['active', 'type'],
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'account_followup_stat_by_partner')
# Here we don't have other choice but to create a virtual ID based on the concatenation
# of the partner_id and the company_id, because if a partner is shared between 2 companies,
# we want to see 2 lines for him in this table. It means that both company should be able
# to send him follow-ups separately . An assumption that the number of companies will not
# reach 10 000 records is made, what should be enough for a time.
cr.execute("""
create view account_followup_stat_by_partner as (
SELECT
l.partner_id * 10000::bigint + l.company_id as id,
l.partner_id AS partner_id,
min(l.date) AS date_move,
max(l.date) AS date_move_last,
max(l.followup_date) AS date_followup,
max(l.followup_line_id) AS max_followup_id,
sum(l.debit - l.credit) AS balance,
l.company_id as company_id
FROM
account_move_line l
LEFT JOIN account_account a ON (l.account_id = a.id)
WHERE
a.active AND
a.type = 'receivable' AND
l.reconcile_id is NULL AND
l.partner_id IS NOT NULL
GROUP BY
l.partner_id, l.company_id
)""")
class account_followup_sending_results(osv.osv_memory):
def do_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
return context.get('report_data')
def do_done(self, cr, uid, ids, context=None):
return {}
def _get_description(self, cr, uid, context=None):
if context is None:
context = {}
return context.get('description')
def _get_need_printing(self, cr, uid, context=None):
if context is None:
context = {}
return context.get('needprinting')
_name = 'account_followup.sending.results'
_description = 'Results from the sending of the different letters and emails'
_columns = {
'description': fields.text("Description", readonly=True),
'needprinting': fields.boolean("Needs Printing")
}
_defaults = {
'needprinting':_get_need_printing,
'description':_get_description,
}
class account_followup_print(osv.osv_memory):
_name = 'account_followup.print'
_description = 'Print Follow-up & Send Mail to Customers'
_columns = {
'date': fields.date('Follow-up Sending Date', required=True,
help="This field allow you to select a forecast date to plan your follow-ups"),
'followup_id': fields.many2one('account_followup.followup', 'Follow-Up', required=True, readonly = True),
'partner_ids': fields.many2many('account_followup.stat.by.partner', 'partner_stat_rel',
'osv_memory_id', 'partner_id', 'Partners', required=True),
'company_id':fields.related('followup_id', 'company_id', type='many2one',
relation='res.company', store=True, readonly=True),
'email_conf': fields.boolean('Send Email Confirmation'),
'email_subject': fields.char('Email Subject', size=64),
'partner_lang': fields.boolean('Send Email in Partner Language',
help='Do not change message text, if you want to send email in partner language, or configure from company'),
'email_body': fields.text('Email Body'),
'summary': fields.text('Summary', readonly=True),
'test_print': fields.boolean('Test Print',
help='Check if you want to print follow-ups without changing follow-up level.'),
}
def _get_followup(self, cr, uid, context=None):
if context is None:
context = {}
if context.get('active_model', 'ir.ui.menu') == 'account_followup.followup':
return context.get('active_id', False)
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
followp_id = self.pool.get('account_followup.followup').search(cr, uid, [('company_id', '=', company_id)], context=context)
return followp_id and followp_id[0] or False
def process_partners(self, cr, uid, partner_ids, data, context=None):
partner_obj = self.pool.get('res.partner')
partner_ids_to_print = []
nbmanuals = 0
manuals = {}
nbmails = 0
nbunknownmails = 0
nbprints = 0
resulttext = " "
for partner in self.pool.get('account_followup.stat.by.partner').browse(cr, uid, partner_ids, context=context):
if partner.max_followup_id.manual_action:
partner_obj.do_partner_manual_action(cr, uid, [partner.partner_id.id], context=context)
nbmanuals = nbmanuals + 1
key = partner.partner_id.payment_responsible_id.name or _("Anybody")
if not key in manuals.keys():
manuals[key]= 1
else:
manuals[key] = manuals[key] + 1
if partner.max_followup_id.send_email:
nbunknownmails += partner_obj.do_partner_mail(cr, uid, [partner.partner_id.id], context=context)
nbmails += 1
if partner.max_followup_id.send_letter:
partner_ids_to_print.append(partner.id)
nbprints += 1
message = "%s<I> %s </I>%s" % (_("Follow-up letter of "), partner.partner_id.latest_followup_level_id_without_lit.name, _(" will be sent"))
partner_obj.message_post(cr, uid, [partner.partner_id.id], body=message, context=context)
if nbunknownmails == 0:
resulttext += str(nbmails) + _(" email(s) sent")
else:
resulttext += str(nbmails) + _(" email(s) should have been sent, but ") + str(nbunknownmails) + _(" had unknown email address(es)") + "\n <BR/> "
resulttext += "<BR/>" + str(nbprints) + _(" letter(s) in report") + " \n <BR/>" + str(nbmanuals) + _(" manual action(s) assigned:")
needprinting = False
if nbprints > 0:
needprinting = True
resulttext += "<p align=\"center\">"
for item in manuals:
resulttext = resulttext + "<li>" + item + ":" + str(manuals[item]) + "\n </li>"
resulttext += "</p>"
result = {}
action = partner_obj.do_partner_print(cr, uid, partner_ids_to_print, data, context=context)
result['needprinting'] = needprinting
result['resulttext'] = resulttext
result['action'] = action or {}
return result
def do_update_followup_level(self, cr, uid, to_update, partner_list, date, context=None):
#update the follow-up level on account.move.line
for id in to_update.keys():
if to_update[id]['partner_id'] in partner_list:
self.pool.get('account.move.line').write(cr, uid, [int(id)], {'followup_line_id': to_update[id]['level'],
'followup_date': date})
def clear_manual_actions(self, cr, uid, partner_list, context=None):
# Partnerlist is list to exclude
# Will clear the actions of partners that have no due payments anymore
partner_list_ids = [partner.partner_id.id for partner in self.pool.get('account_followup.stat.by.partner').browse(cr, uid, partner_list, context=context)]
ids = self.pool.get('res.partner').search(cr, uid, ['&', ('id', 'not in', partner_list_ids), '|',
('payment_responsible_id', '!=', False),
('payment_next_action_date', '!=', False)], context=context)
partners_to_clear = []
for part in self.pool.get('res.partner').browse(cr, uid, ids, context=context):
if not part.unreconciled_aml_ids:
partners_to_clear.append(part.id)
self.pool.get('res.partner').action_done(cr, uid, partners_to_clear, context=context)
return len(partners_to_clear)
def do_process(self, cr, uid, ids, context=None):
context = dict(context or {})
#Get partners
tmp = self._get_partners_followp(cr, uid, ids, context=context)
partner_list = tmp['partner_ids']
to_update = tmp['to_update']
date = self.browse(cr, uid, ids, context=context)[0].date
data = self.read(cr, uid, ids, context=context)[0]
data['followup_id'] = data['followup_id'][0]
#Update partners
self.do_update_followup_level(cr, uid, to_update, partner_list, date, context=context)
#process the partners (send mails...)
restot_context = context.copy()
restot = self.process_partners(cr, uid, partner_list, data, context=restot_context)
context.update(restot_context)
#clear the manual actions if nothing is due anymore
nbactionscleared = self.clear_manual_actions(cr, uid, partner_list, context=context)
if nbactionscleared > 0:
restot['resulttext'] = restot['resulttext'] + "<li>" + _("%s partners have no credits and as such the action is cleared") %(str(nbactionscleared)) + "</li>"
#return the next action
mod_obj = self.pool.get('ir.model.data')
model_data_ids = mod_obj.search(cr, uid, [('model','=','ir.ui.view'),('name','=','view_account_followup_sending_results')], context=context)
resource_id = mod_obj.read(cr, uid, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
context.update({'description': restot['resulttext'], 'needprinting': restot['needprinting'], 'report_data': restot['action']})
return {
'name': _('Send Letters and Emails: Actions Summary'),
'view_type': 'form',
'context': context,
'view_mode': 'tree,form',
'res_model': 'account_followup.sending.results',
'views': [(resource_id,'form')],
'type': 'ir.actions.act_window',
'target': 'new',
}
def _get_msg(self, cr, uid, context=None):
return self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.follow_up_msg
_defaults = {
'date': lambda *a: time.strftime('%Y-%m-%d'),
'followup_id': _get_followup,
'email_body': "",
'email_subject': _('Invoices Reminder'),
'partner_lang': True,
}
def _get_partners_followp(self, cr, uid, ids, context=None):
data = {}
data = self.browse(cr, uid, ids, context=context)[0]
company_id = data.company_id.id
cr.execute(
"SELECT l.partner_id, l.followup_line_id,l.date_maturity, l.date, l.id "\
"FROM account_move_line AS l "\
"LEFT JOIN account_account AS a "\
"ON (l.account_id=a.id) "\
"WHERE (l.reconcile_id IS NULL) "\
"AND (a.type='receivable') "\
"AND (l.state<>'draft') "\
"AND (l.partner_id is NOT NULL) "\
"AND (a.active) "\
"AND (l.debit > 0) "\
"AND (l.company_id = %s) " \
"AND (l.blocked = False)" \
"ORDER BY l.date", (company_id,)) #l.blocked added to take litigation into account and it is not necessary to change follow-up level of account move lines without debit
move_lines = cr.fetchall()
old = None
fups = {}
fup_id = 'followup_id' in context and context['followup_id'] or data.followup_id.id
date = 'date' in context and context['date'] or data.date
current_date = datetime.date(*time.strptime(date,
'%Y-%m-%d')[:3])
cr.execute(
"SELECT * "\
"FROM account_followup_followup_line "\
"WHERE followup_id=%s "\
"ORDER BY delay", (fup_id,))
#Create dictionary of tuples where first element is the date to compare with the due date and second element is the id of the next level
for result in cr.dictfetchall():
delay = datetime.timedelta(days=result['delay'])
fups[old] = (current_date - delay, result['id'])
old = result['id']
partner_list = []
to_update = {}
#Fill dictionary of accountmovelines to_update with the partners that need to be updated
for partner_id, followup_line_id, date_maturity,date, id in move_lines:
if not partner_id:
continue
if followup_line_id not in fups:
continue
stat_line_id = partner_id * 10000 + company_id
if date_maturity:
if date_maturity <= fups[followup_line_id][0].strftime('%Y-%m-%d'):
if stat_line_id not in partner_list:
partner_list.append(stat_line_id)
to_update[str(id)]= {'level': fups[followup_line_id][1], 'partner_id': stat_line_id}
elif date and date <= fups[followup_line_id][0].strftime('%Y-%m-%d'):
if stat_line_id not in partner_list:
partner_list.append(stat_line_id)
to_update[str(id)]= {'level': fups[followup_line_id][1], 'partner_id': stat_line_id}
return {'partner_ids': partner_list, 'to_update': to_update}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
adit-chandra/tensorflow | third_party/mlir/bindings/python/test/test_py2and3.py | 3 | 19351 | # Copyright 2019 The MLIR Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/test_edsc %s | FileCheck %s
"""Python2 and 3 test for the MLIR EDSC Python bindings"""
import google_mlir.bindings.python.pybind as E
import inspect
# Prints `str` prefixed by the current test function name so we can use it in
# Filecheck label directives.
# This is achieved by inspecting the stack and getting the parent name.
def printWithCurrentFunctionName(str):
print(inspect.stack()[1][3])
print(str)
class EdscTest:
def setUp(self):
self.module = E.MLIRModule()
self.boolType = self.module.make_type("i1")
self.i32Type = self.module.make_type("i32")
self.f32Type = self.module.make_type("f32")
self.indexType = self.module.make_index_type()
def testBlockArguments(self):
self.setUp()
with self.module.function_context("foo", [], []) as fun:
E.constant_index(42)
with E.BlockContext([self.f32Type, self.f32Type]) as b:
b.arg(0) + b.arg(1)
printWithCurrentFunctionName(str(fun))
# CHECK-LABEL: testBlockArguments
# CHECK: %{{.*}} = constant 42 : index
# CHECK: ^bb{{.*}}(%{{.*}}: f32, %{{.*}}: f32):
# CHECK: %{{.*}} = addf %{{.*}}, %{{.*}} : f32
def testBlockContext(self):
self.setUp()
with self.module.function_context("foo", [], []) as fun:
cst = E.constant_index(42)
with E.BlockContext():
cst + cst
printWithCurrentFunctionName(str(fun))
# CHECK-LABEL: testBlockContext
# CHECK: %{{.*}} = constant 42 : index
# CHECK: ^bb
# CHECK: %{{.*}} = "affine.apply"() {map = () -> (84)} : () -> index
def testBlockContextAppend(self):
self.setUp()
with self.module.function_context("foo", [], []) as fun:
E.constant_index(41)
with E.BlockContext() as b:
blk = b # save block handle for later
E.constant_index(0)
E.constant_index(42)
with E.BlockContext(E.appendTo(blk)):
E.constant_index(1)
printWithCurrentFunctionName(str(fun))
# CHECK-LABEL: testBlockContextAppend
# CHECK: %{{.*}} = constant 41 : index
# CHECK: %{{.*}} = constant 42 : index
# CHECK: ^bb
# CHECK: %{{.*}} = constant 0 : index
# CHECK: %{{.*}} = constant 1 : index
def testBlockContextStandalone(self):
self.setUp()
with self.module.function_context("foo", [], []) as fun:
blk1 = E.BlockContext()
blk2 = E.BlockContext()
with blk1:
E.constant_index(0)
with blk2:
E.constant_index(56)
E.constant_index(57)
E.constant_index(41)
with blk1:
E.constant_index(1)
E.constant_index(42)
printWithCurrentFunctionName(str(fun))
# CHECK-LABEL: testBlockContextStandalone
# CHECK: %{{.*}} = constant 41 : index
# CHECK: %{{.*}} = constant 42 : index
# CHECK: ^bb
# CHECK: %{{.*}} = constant 0 : index
# CHECK: %{{.*}} = constant 1 : index
# CHECK: ^bb
# CHECK: %{{.*}} = constant 56 : index
# CHECK: %{{.*}} = constant 57 : index
def testBooleanOps(self):
self.setUp()
with self.module.function_context(
"booleans", [self.boolType for _ in range(4)], []) as fun:
i, j, k, l = (fun.arg(x) for x in range(4))
stmt1 = (i < j) & (j >= k)
stmt2 = ~(stmt1 | (k == l))
printWithCurrentFunctionName(str(fun))
# CHECK-LABEL: testBooleanOps
# CHECK: %{{.*}} = cmpi "slt", %{{.*}}, %{{.*}} : i1
# CHECK: %{{.*}} = cmpi "sge", %{{.*}}, %{{.*}} : i1
# CHECK: %{{.*}} = muli %{{.*}}, %{{.*}} : i1
# CHECK: %{{.*}} = cmpi "eq", %{{.*}}, %{{.*}} : i1
# CHECK: %{{.*}} = constant 1 : i1
# CHECK: %{{.*}} = subi %{{.*}}, %{{.*}} : i1
# CHECK: %{{.*}} = constant 1 : i1
# CHECK: %{{.*}} = subi %{{.*}}, %{{.*}} : i1
# CHECK: %{{.*}} = muli %{{.*}}, %{{.*}} : i1
# CHECK: %{{.*}} = constant 1 : i1
# CHECK: %{{.*}} = subi %{{.*}}, %{{.*}} : i1
# CHECK: %{{.*}} = constant 1 : i1
# CHECK: %{{.*}} = subi %{{.*}}, %{{.*}} : i1
def testBr(self):
self.setUp()
with self.module.function_context("foo", [], []) as fun:
with E.BlockContext() as b:
blk = b
E.ret()
E.br(blk)
printWithCurrentFunctionName(str(fun))
# CHECK-LABEL: testBr
# CHECK: br ^bb
# CHECK: ^bb
# CHECK: return
def testBrArgs(self):
self.setUp()
with self.module.function_context("foo", [], []) as fun:
# Create an infinite loop.
with E.BlockContext([self.indexType, self.indexType]) as b:
E.br(b, [b.arg(1), b.arg(0)])
E.br(b, [E.constant_index(0), E.constant_index(1)])
printWithCurrentFunctionName(str(fun))
# CHECK-LABEL: testBrArgs
# CHECK: %{{.*}} = constant 0 : index
# CHECK: %{{.*}} = constant 1 : index
# CHECK: br ^bb{{.*}}(%{{.*}}, %{{.*}} : index, index)
# CHECK: ^bb{{.*}}(%{{.*}}: index, %{{.*}}: index):
# CHECK: br ^bb{{.*}}(%{{.*}}, %{{.*}} : index, index)
def testBrDeclaration(self):
self.setUp()
with self.module.function_context("foo", [], []) as fun:
blk = E.BlockContext()
E.br(blk.handle())
with blk:
E.ret()
printWithCurrentFunctionName(str(fun))
# CHECK-LABEL: testBrDeclaration
# CHECK: br ^bb
# CHECK: ^bb
# CHECK: return
def testCallOp(self):
self.setUp()
callee = self.module.declare_function("sqrtf", [self.f32Type],
[self.f32Type])
with self.module.function_context("call", [self.f32Type], []) as fun:
funCst = E.constant_function(callee)
funCst([fun.arg(0)]) + E.constant_float(42., self.f32Type)
printWithCurrentFunctionName(str(self.module))
# CHECK-LABEL: testCallOp
# CHECK: func @sqrtf(f32) -> f32
# CHECK: %{{.*}} = constant @sqrtf : (f32) -> f32
# CHECK: %{{.*}} = call_indirect %{{.*}}(%{{.*}}) : (f32) -> f32
def testCondBr(self):
self.setUp()
with self.module.function_context("foo", [self.boolType], []) as fun:
with E.BlockContext() as blk1:
E.ret([])
with E.BlockContext([self.indexType]) as blk2:
E.ret([])
cst = E.constant_index(0)
E.cond_br(fun.arg(0), blk1, [], blk2, [cst])
printWithCurrentFunctionName(str(fun))
# CHECK-LABEL: testCondBr
# CHECK: cond_br %{{.*}}, ^bb{{.*}}, ^bb{{.*}}(%{{.*}} : index)
def testConstants(self):
self.setUp()
with self.module.function_context("constants", [], []) as fun:
E.constant_float(1.23, self.module.make_type("bf16"))
E.constant_float(1.23, self.module.make_type("f16"))
E.constant_float(1.23, self.module.make_type("f32"))
E.constant_float(1.23, self.module.make_type("f64"))
E.constant_int(1, 1)
E.constant_int(123, 8)
E.constant_int(123, 16)
E.constant_int(123, 32)
E.constant_int(123, 64)
E.constant_index(123)
E.constant_function(fun)
printWithCurrentFunctionName(str(fun))
# CHECK-LABEL: testConstants
# CHECK: constant 1.230000e+00 : bf16
# CHECK: constant 1.230470e+00 : f16
# CHECK: constant 1.230000e+00 : f32
# CHECK: constant 1.230000e+00 : f64
# CHECK: constant 1 : i1
# CHECK: constant 123 : i8
# CHECK: constant 123 : i16
# CHECK: constant 123 : i32
# CHECK: constant 123 : index
# CHECK: constant @constants : () -> ()
def testCustom(self):
self.setUp()
with self.module.function_context("custom", [self.indexType, self.f32Type],
[]) as fun:
E.op("foo", [fun.arg(0)], [self.f32Type]) + fun.arg(1)
printWithCurrentFunctionName(str(fun))
# CHECK-LABEL: testCustom
# CHECK: %{{.*}} = "foo"(%{{.*}}) : (index) -> f32
# CHECK: %{{.*}} = addf %{{.*}}, %{{.*}} : f32
# Create 'addi' using the generic Op interface. We need an operation known
# to the execution engine so that the engine can compile it.
def testCustomOpCompilation(self):
self.setUp()
with self.module.function_context("adder", [self.i32Type], []) as f:
c1 = E.op(
"std.constant", [], [self.i32Type],
value=self.module.integerAttr(self.i32Type, 42))
E.op("std.addi", [c1, f.arg(0)], [self.i32Type])
E.ret([])
self.module.compile()
printWithCurrentFunctionName(str(self.module.get_engine_address() == 0))
# CHECK-LABEL: testCustomOpCompilation
# CHECK: False
def testDivisions(self):
self.setUp()
with self.module.function_context(
"division", [self.indexType, self.i32Type, self.i32Type], []) as fun:
# indices only support floor division
fun.arg(0) // E.constant_index(42)
# regular values only support regular division
fun.arg(1) / fun.arg(2)
printWithCurrentFunctionName(str(self.module))
# CHECK-LABEL: testDivisions
# CHECK: floordiv 42
# CHECK: divis %{{.*}}, %{{.*}} : i32
def testFunctionArgs(self):
self.setUp()
with self.module.function_context("foo", [self.f32Type, self.f32Type],
[self.indexType]) as fun:
pass
printWithCurrentFunctionName(str(fun))
# CHECK-LABEL: testFunctionArgs
# CHECK: func @foo(%{{.*}}: f32, %{{.*}}: f32) -> index
def testFunctionContext(self):
self.setUp()
with self.module.function_context("foo", [], []):
pass
printWithCurrentFunctionName(self.module.get_function("foo"))
# CHECK-LABEL: testFunctionContext
# CHECK: func @foo() {
def testFunctionDeclaration(self):
self.setUp()
boolAttr = self.module.boolAttr(True)
t = self.module.make_memref_type(self.f32Type, [10])
t_llvm_noalias = t({"llvm.noalias": boolAttr})
t_readonly = t({"readonly": boolAttr})
f = self.module.declare_function("foo", [t, t_llvm_noalias, t_readonly], [])
printWithCurrentFunctionName(str(self.module))
# CHECK-LABEL: testFunctionDeclaration
# CHECK: func @foo(memref<10xf32>, memref<10xf32> {llvm.noalias = true}, memref<10xf32> {readonly = true})
def testFunctionMultiple(self):
self.setUp()
with self.module.function_context("foo", [], []):
pass
with self.module.function_context("foo", [], []):
E.constant_index(0)
printWithCurrentFunctionName(str(self.module))
# CHECK-LABEL: testFunctionMultiple
# CHECK: func @foo()
# CHECK: func @foo_0()
# CHECK: %{{.*}} = constant 0 : index
def testIndexCast(self):
self.setUp()
with self.module.function_context("testIndexCast", [], []):
index = E.constant_index(0)
E.index_cast(index, self.i32Type)
printWithCurrentFunctionName(str(self.module))
# CHECK-LABEL: testIndexCast
# CHECK: index_cast %{{.*}} : index to i32
def testIndexedValue(self):
self.setUp()
memrefType = self.module.make_memref_type(self.f32Type, [10, 42])
with self.module.function_context("indexed", [memrefType],
[memrefType]) as fun:
A = E.IndexedValue(fun.arg(0))
cst = E.constant_float(1., self.f32Type)
with E.LoopNestContext(
[E.constant_index(0), E.constant_index(0)],
[E.constant_index(10), E.constant_index(42)], [1, 1]) as (i, j):
A.store([i, j], A.load([i, j]) + cst)
E.ret([fun.arg(0)])
printWithCurrentFunctionName(str(fun))
# CHECK-LABEL: testIndexedValue
# CHECK: "affine.for"()
# CHECK: "affine.for"()
# CHECK: "affine.load"
# CHECK-SAME: memref<10x42xf32>
# CHECK: %{{.*}} = addf %{{.*}}, %{{.*}} : f32
# CHECK: "affine.store"
# CHECK-SAME: memref<10x42xf32>
# CHECK: {lower_bound = () -> (0), step = 1 : index, upper_bound = () -> (42)}
# CHECK: {lower_bound = () -> (0), step = 1 : index, upper_bound = () -> (10)}
def testLoopContext(self):
self.setUp()
with self.module.function_context("foo", [], []) as fun:
lhs = E.constant_index(0)
rhs = E.constant_index(42)
with E.LoopContext(lhs, rhs, 1) as i:
lhs + rhs + i
with E.LoopContext(rhs, rhs + rhs, 2) as j:
x = i + j
printWithCurrentFunctionName(str(fun))
# CHECK-LABEL: testLoopContext
# CHECK: "affine.for"() (
# CHECK: ^bb{{.*}}(%{{.*}}: index):
# CHECK: "affine.for"(%{{.*}}, %{{.*}}) (
# CHECK: ^bb{{.*}}(%{{.*}}: index):
# CHECK: "affine.apply"(%{{.*}}, %{{.*}}) {map = (d0, d1) -> (d0 + d1)} : (index, index) -> index
# CHECK: {lower_bound = (d0) -> (d0), step = 2 : index, upper_bound = (d0) -> (d0)} : (index, index) -> ()
# CHECK: {lower_bound = () -> (0), step = 1 : index, upper_bound = () -> (42)}
def testLoopNestContext(self):
self.setUp()
with self.module.function_context("foo", [], []) as fun:
lbs = [E.constant_index(i) for i in range(4)]
ubs = [E.constant_index(10 * i + 5) for i in range(4)]
with E.LoopNestContext(lbs, ubs, [1, 3, 5, 7]) as (i, j, k, l):
i + j + k + l
printWithCurrentFunctionName(str(fun))
# CHECK-LABEL: testLoopNestContext
# CHECK: "affine.for"() (
# CHECK: ^bb{{.*}}(%{{.*}}: index):
# CHECK: "affine.for"() (
# CHECK: ^bb{{.*}}(%{{.*}}: index):
# CHECK: "affine.for"() (
# CHECK: ^bb{{.*}}(%{{.*}}: index):
# CHECK: "affine.for"() (
# CHECK: ^bb{{.*}}(%{{.*}}: index):
# CHECK: %{{.*}} = "affine.apply"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {map = (d0, d1, d2, d3) -> (d0 + d1 + d2 + d3)} : (index, index, index, index) -> index
def testMLIRBooleanCompilation(self):
self.setUp()
m = self.module.make_memref_type(self.boolType, [10]) # i1 tensor
with self.module.function_context("mkbooltensor", [m, m], []) as f:
input = E.IndexedValue(f.arg(0))
output = E.IndexedValue(f.arg(1))
zero = E.constant_index(0)
ten = E.constant_index(10)
with E.LoopNestContext([zero] * 3, [ten] * 3, [1] * 3) as (i, j, k):
b1 = (i < j) & (j < k)
b2 = ~b1
b3 = b2 | (k < j)
output.store([i], input.load([i]) & b3)
E.ret([])
self.module.compile()
printWithCurrentFunctionName(str(self.module.get_engine_address() == 0))
# CHECK-LABEL: testMLIRBooleanCompilation
# CHECK: False
def testMLIRFunctionCreation(self):
self.setUp()
module = E.MLIRModule()
t = module.make_type("f32")
m = module.make_memref_type(t, [3, 4, -1, 5])
printWithCurrentFunctionName(str(t))
print(str(m))
print(str(module.make_function("copy", [m, m], [])))
print(str(module.make_function("sqrtf", [t], [t])))
# CHECK-LABEL: testMLIRFunctionCreation
# CHECK: f32
# CHECK: memref<3x4x?x5xf32>
# CHECK: func @copy(%{{.*}}: memref<3x4x?x5xf32>, %{{.*}}: memref<3x4x?x5xf32>) {
# CHECK: func @sqrtf(%{{.*}}: f32) -> f32
def testMLIRScalarTypes(self):
self.setUp()
module = E.MLIRModule()
printWithCurrentFunctionName(str(module.make_type("bf16")))
print(str(module.make_type("f16")))
print(str(module.make_type("f32")))
print(str(module.make_type("f64")))
print(str(module.make_type("i1")))
print(str(module.make_type("i8")))
print(str(module.make_type("i32")))
print(str(module.make_type("i123")))
print(str(module.make_type("index")))
# CHECK-LABEL: testMLIRScalarTypes
# CHECK: bf16
# CHECK: f16
# CHECK: f32
# CHECK: f64
# CHECK: i1
# CHECK: i8
# CHECK: i32
# CHECK: i123
# CHECK: index
def testMatrixMultiply(self):
self.setUp()
memrefType = self.module.make_memref_type(self.f32Type, [32, 32])
with self.module.function_context(
"matmul", [memrefType, memrefType, memrefType], []) as fun:
A = E.IndexedValue(fun.arg(0))
B = E.IndexedValue(fun.arg(1))
C = E.IndexedValue(fun.arg(2))
c0 = E.constant_index(0)
c32 = E.constant_index(32)
with E.LoopNestContext([c0, c0, c0], [c32, c32, c32], [1, 1, 1]) as (i, j,
k):
C.store([i, j], A.load([i, k]) * B.load([k, j]))
E.ret([])
printWithCurrentFunctionName(str(fun))
# CHECK-LABEL: testMatrixMultiply
# CHECK: "affine.for"()
# CHECK: "affine.for"()
# CHECK: "affine.for"()
# CHECK-DAG: %{{.*}} = "affine.load"
# CHECK-DAG: %{{.*}} = "affine.load"
# CHECK: %{{.*}} = mulf %{{.*}}, %{{.*}} : f32
# CHECK: "affine.store"
# CHECK-SAME: memref<32x32xf32>
# CHECK: {lower_bound = () -> (0), step = 1 : index, upper_bound = () -> (32)} : () -> ()
# CHECK: {lower_bound = () -> (0), step = 1 : index, upper_bound = () -> (32)} : () -> ()
# CHECK: {lower_bound = () -> (0), step = 1 : index, upper_bound = () -> (32)} : () -> ()
def testRet(self):
self.setUp()
with self.module.function_context("foo", [],
[self.indexType, self.indexType]) as fun:
c42 = E.constant_index(42)
c0 = E.constant_index(0)
E.ret([c42, c0])
printWithCurrentFunctionName(str(fun))
# CHECK-LABEL: testRet
# CHECK: %{{.*}} = constant 42 : index
# CHECK: %{{.*}} = constant 0 : index
# CHECK: return %{{.*}}, %{{.*}} : index, index
def testSelectOp(self):
self.setUp()
with self.module.function_context("foo", [self.boolType],
[self.i32Type]) as fun:
a = E.constant_int(42, 32)
b = E.constant_int(0, 32)
E.ret([E.select(fun.arg(0), a, b)])
printWithCurrentFunctionName(str(fun))
# CHECK-LABEL: testSelectOp
# CHECK: %{{.*}} = select %{{.*}}, %{{.*}}, %{{.*}} : i32
# Until python 3.6 this cannot be used because the order in the dict is not the
# order of method declaration.
def runTests():
def isTest(attr):
return inspect.ismethod(attr) and "EdscTest.setUp " not in str(attr)
edscTest = EdscTest()
tests = sorted(filter(isTest,
(getattr(edscTest, attr) for attr in dir(edscTest))),
key = lambda x : str(x))
for test in tests:
test()
if __name__ == '__main__':
runTests()
| apache-2.0 |
citrix-openstack-build/nova | nova/tests/servicegroup/test_zk_driver.py | 42 | 2605 | # Copyright (c) AT&T 2012-2013 Yun Mao <yunmao@gmail.com>
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test the ZooKeeper driver for servicegroup.
You need to install ZooKeeper locally and related dependencies
to run the test. It's unclear how to install python-zookeeper lib
in venv so you might have to run the test without it.
To set up in Ubuntu 12.04:
$ sudo apt-get install zookeeper zookeeperd python-zookeeper
$ sudo pip install evzookeeper
$ nosetests nova.tests.servicegroup.test_zk_driver
"""
import eventlet
from nova import servicegroup
from nova import test
class ZKServiceGroupTestCase(test.NoDBTestCase):
def setUp(self):
super(ZKServiceGroupTestCase, self).setUp()
servicegroup.API._driver = None
from nova.servicegroup.drivers import zk
self.flags(servicegroup_driver='zk')
self.flags(address='localhost:2181', group="zookeeper")
try:
zk.ZooKeeperDriver()
except ImportError:
self.skipTest("Unable to test due to lack of ZooKeeper")
def test_join_leave(self):
self.servicegroup_api = servicegroup.API()
service_id = {'topic': 'unittest', 'host': 'serviceA'}
self.servicegroup_api.join(service_id['host'], service_id['topic'])
self.assertTrue(self.servicegroup_api.service_is_up(service_id))
self.servicegroup_api.leave(service_id['host'], service_id['topic'])
# make sure zookeeper is updated and watcher is triggered
eventlet.sleep(1)
self.assertFalse(self.servicegroup_api.service_is_up(service_id))
def test_stop(self):
self.servicegroup_api = servicegroup.API()
service_id = {'topic': 'unittest', 'host': 'serviceA'}
pulse = self.servicegroup_api.join(service_id['host'],
service_id['topic'], None)
self.assertTrue(self.servicegroup_api.service_is_up(service_id))
pulse.stop()
eventlet.sleep(1)
self.assertFalse(self.servicegroup_api.service_is_up(service_id))
| apache-2.0 |
unusedPhD/amoco | amoco/arch/arm/v7/asm.py | 6 | 25084 | # -*- coding: utf-8 -*-
# This code is part of Amoco
# Copyright (C) 2006-2011 Axel Tillequin (bdcht3@gmail.com)
# published under GPLv2 license
from .env import *
#utilities:
#----------
from .utils import *
from amoco.cas.utils import *
from amoco.arch.core import InstructionError
from amoco.logger import Log
logger = Log(__name__)
#------------------------------------------------------------------------------
# low level functions :
def _switch_isetstate():
_s = internals['isetstate']
internals['isetstate'] = 0 if _s==1 else 1
logger.info('switch to %s instructions'%({0:'ARM',1:'Thumb'}[internals['isetstate']]))
def __check_state(i,fmap):
address = fmap(pc)
if address.bit(0)==1:
internals['isetstate'] = 1
elif address.bit(1)==0:
internals['isetstate'] = 0
else:
if address._is_cst:
raise InstructionError(i)
else:
logger.verbose('impossible to check isetstate (ARM/Thumb) until pc is cst')
def __pre(i,fmap):
fmap[pc] = fmap(pc+i.length)
cond = fmap(CONDITION[i.cond][1])
dest,op1 = i.operands[0:2]
if op1 is pc: op1=op1+i.length
if len(i.operands)==3:
op2 = i.operands[2]
if op2 is pc: op2=op2+i.length
return cond,dest,op1,op2
if len(i.operands)>3:
ops = tuple(i.operands[3:])
return (cond,dest,op1,op2)+ops
return cond,dest,op1
def __setflags(fmap,cond,cout,result,overflow=None):
if cout is None: cout = fmap(C)
fmap[C] = tst(cond,cout,fmap(C))
fmap[Z] = tst(cond,(result==0),fmap(Z))
fmap[N] = tst(cond,(result<0),fmap(N))
if overflow is not None:
fmap[V] = tst(cond,overflow,fmap(V))
# i_xxx is the translation of UAL (ARM/Thumb) instruction xxx.
#------------------------------------------------------------------------------
# Branch instructions (A4.3, pA4-7)
def i_B(i,fmap):
fmap[pc] = fmap(pc+i.length)
cond = CONDITION[i.cond][1]
pcoffset = i.length
if internals['isetstate'] and pcoffset==4: pcoffset=0
fmap[pc] = fmap(tst(cond,pc+i.imm32+pcoffset,pc))
__check_state(i,fmap)
def i_CBNZ(i,fmap):
fmap[pc] = fmap(pc+i.length)
op1,op2 = i.operands
fmap[pc] = fmap(tst(i.n!=0,pc+i.imm32+i.length,pc))
__check_state(i,fmap)
def i_CBZ(i,fmap):
fmap[pc] = fmap(pc+i.length)
op1,op2 = i.operands
fmap[pc] = fmap(tst(i.n==0,pc+i.imm32+i.length,pc))
__check_state(i,fmap)
def i_BL(i,fmap):
fmap[pc] = fmap(pc+i.length)
fmap[lr] = fmap(pc)
offset = i.operands[0]
cond = CONDITION[i.cond][1]
pcoffset = i.length
if internals['isetstate']==1 and pcoffset==4: pcoffset=0
fmap[pc] = fmap(tst(cond,pc+offset+pcoffset,pc))
__check_state(i,fmap)
def i_BLX(i,fmap):
fmap[pc] = fmap(pc+i.length)
fmap[lr] = fmap(pc)
src = i.operands[0]
cond = CONDITION[i.cond][1]
fmap[pc] = fmap(tst(cond,src,pc))
__check_state(i,fmap)
def i_BX(i,fmap):
fmap[pc] = fmap(pc+i.length)
src = i.operands[0]
cond = CONDITION[i.cond][1]
fmap[pc] = fmap(tst(cond,src,pc))
_switch_isetstate()
def i_BXJ(i,fmap):
fmap[pc] = fmap(pc+i.length)
fmap[lr] = fmap(pc)
src = i.operands[0]
cond = CONDITION[i.cond][1]
fmap[pc] = fmap(tst(cond,src,pc))
internals['isetstate'] = 2
logger.error('switch to Jazelle instructions (unsupported)')
# Data processing instructions (A4.4)
# standard (4.4.1):
def i_ADC(i,fmap):
cond,dest,op1,op2 = __pre(i,fmap)
result,cout,overflow = AddWithCarry(fmap(op1),fmap(op2),fmap(C))
fmap[dest] = tst(cond,result,fmap(dest))
if dest is pc:
__check_state(i,fmap)
elif i.setflags:
__setflags(fmap,cond,cout,result,overflow)
def i_ADD(i,fmap):
cond,dest,op1,op2 = __pre(i,fmap)
result,cout,overflow = AddWithCarry(fmap(op1),fmap(op2))
fmap[dest] = tst(cond,result,fmap(dest))
if dest is pc:
__check_state(i,fmap)
elif i.setflags:
__setflags(fmap,cond,cout,result,overflow)
def i_ADR(i,fmap):
fmap[pc] = fmap(pc+i.length)
if i.add:
result = fmap(pc&0xFFFFFFFC)+i.imm32+i.length
else:
result = fmap(pc&0xFFFFFFFC)-i.imm32+i.length
cond = fmap(CONDITION[i.cond][1])
fmap[i.d] = tst(cond,result,fmap(i.d))
def i_AND(i,fmap):
cond,dest,op1,op2 = __pre(i,fmap)
result = fmap(op1 & op2)
cout = fmap(op2.bit(31))
fmap[dest] = tst(cond,result,fmap(dest))
if dest is pc:
__check_state(i,fmap)
elif i.setflags:
__setflags(fmap,cond,cout,result)
def i_BIC(i,fmap):
cond,dest,op1,op2 = __pre(i,fmap)
result = fmap(op1 & (~op2))
cout = fmap(op2.bit(31))
fmap[dest] = tst(cond,result,fmap(dest))
if dest is pc:
__check_state(i,fmap)
elif i.setflags:
__setflags(fmap,cond,cout,result)
def i_CMN(i,fmap):
cond,dest,op1 = __pre(i,fmap)
result,cout,overflow = AddWithCarry(fmap(dest),fmap(op1))
__setflags(fmap,cond,cout,result,overflow)
def i_CMP(i,fmap):
cond,dest,op1 = __pre(i,fmap)
result,cout,overflow = SubWithBorrow(fmap(dest),fmap(op1))
__setflags(fmap,cond,cout,result,overflow)
def i_EOR(i,fmap):
cond,dest,op1,op2 = __pre(i,fmap)
result = fmap(op1 ^ op2)
cout = fmap(op2.bit(31))
fmap[dest] = tst(cond,result,fmap(dest))
if dest is pc:
__check_state(i,fmap)
elif i.setflags:
__setflags(fmap,cond,cout,result)
def i_MOV(i,fmap):
cond,dest,op1 = __pre(i,fmap)
result = fmap(op1)
cout = fmap(op1.bit(31))
fmap[dest] = tst(cond,result,fmap(dest))
if dest is pc:
__check_state(i,fmap)
elif i.setflags:
__setflags(fmap,cond,cout,result)
def i_MOVW(i,fmap):
cond,dest,op1 = __pre(i,fmap)
result = fmap(op1)
cout = fmap(op1.bit(31))
fmap[dest] = tst(cond,result,fmap(dest))
if dest is pc:
__check_state(i,fmap)
elif i.setflags:
__setflags(fmap,cond,cout,result)
def i_MVN(i,fmap):
cond,dest,op1 = __pre(i,fmap)
result = fmap(~op1)
cout = fmap(op1.bit(31))
fmap[dest] = tst(cond,result,fmap(dest))
if dest is pc:
__check_state(i,fmap)
elif i.setflags:
__setflags(fmap,cond,cout,result)
def i_ORN(i,fmap):
cond,dest,op1,op2 = __pre(i,fmap)
result = fmap(op1 | ~op2)
cout = fmap(op2.bit(31))
fmap[dest] = tst(cond,result,fmap(dest))
if dest is pc:
__check_state(i,fmap)
elif i.setflags:
__setflags(fmap,cond,cout,result)
def i_ORR(i,fmap):
cond,dest,op1,op2 = __pre(i,fmap)
result = fmap(op1 | op2)
cout = fmap(op2.bit(31))
fmap[dest] = tst(cond,result,fmap(dest))
if dest is pc:
__check_state(i,fmap)
elif i.setflags:
__setflags(fmap,cond,cout,result)
def i_RSB(i,fmap):
cond,dest,op1,op2 = __pre(i,fmap)
result,cout,overflow = AddWithCarry(fmap(~op1),fmap(op2),bit1)
fmap[dest] = tst(cond,result,fmap(dest))
if dest is pc:
__check_state(i,fmap)
elif i.setflags:
__setflags(fmap,cond,cout,result,overflow)
def i_RSC(i,fmap):
cond,dest,op1,op2 = __pre(i,fmap)
result,cout,overflow = AddWithCarry(fmap(~op1),fmap(op2),fmap(C))
fmap[dest] = tst(cond,result,fmap(dest))
if dest is pc:
__check_state(i,fmap)
elif i.setflags:
__setflags(fmap,cond,cout,result,overflow)
def i_SBC(i,fmap):
cond,dest,op1,op2 = __pre(i,fmap)
result,cout,overflow = AddWithCarry(fmap(op1),fmap(~op2),fmap(C))
fmap[dest] = tst(cond,result,fmap(dest))
if dest is pc:
__check_state(i,fmap)
elif i.setflags:
__setflags(fmap,cond,cout,result,overflow)
def i_SUB(i,fmap):
cond,dest,op1,op2 = __pre(i,fmap)
result,cout,overflow = AddWithCarry(fmap(op1),fmap(~op2),bit1)
fmap[dest] = tst(cond,result,fmap(dest))
if dest is pc:
__check_state(i,fmap)
elif i.setflags:
__setflags(fmap,cond,cout,result,overflow)
def i_TEQ(i,fmap):
cond,dest,op1 = __pre(i,fmap)
result = fmap(dest ^ op1)
cout = fmap(op1.bit(31))
__setflags(fmap,cond,cout,result)
def i_TST(i,fmap):
cond,dest,op1 = __pre(i,fmap)
result = fmap(dest & op1)
cout = fmap(op1.bit(31))
__setflags(fmap,cond,cout,result)
# shifts (4.4.2)
def i_ASR(i,fmap):
cond,dest,op1,op2 = __pre(i,fmap)
shift = fmap(op2)
if shift._is_cst:
result,cout = ASR_C(fmap(op1),shift.value)
else:
result,cout = fmap(op1>>op2), top(1)
fmap[dest] = tst(cond,result,fmap(dest))
if dest is pc:
__check_state(i,fmap)
elif i.setflags:
__setflags(fmap,cond,cout,result)
def i_LSL(i,fmap):
cond,dest,op1,op2 = __pre(i,fmap)
shift = fmap(op2)
if shift._is_cst:
result,cout = LSL_C(fmap(op1),shift.value)
else:
result,cout = fmap(op1<<op2), top(1)
fmap[dest] = tst(cond,result,fmap(dest))
if dest is pc:
__check_state(i,fmap)
elif i.setflags:
__setflags(fmap,cond,cout,result)
def i_LSR(i,fmap):
cond,dest,op1,op2 = __pre(i,fmap)
shift = fmap(op2)
if shift._is_cst:
result,cout = LSR_C(fmap(op1),shift.value)
else:
result,cout = fmap(op1>>op2), top(1)
fmap[dest] = tst(cond,result,fmap(dest))
if dest is pc:
__check_state(i,fmap)
elif i.setflags:
__setflags(fmap,cond,cout,result)
def i_ROR(i,fmap):
cond,dest,op1,op2 = __pre(i,fmap)
shift = fmap(op2)
if shift._is_cst:
result,cout = ROR_C(fmap(op1),shift.value)
else:
result,cout = ror(op1,op2), top(1)
fmap[dest] = tst(cond,result,fmap(dest))
if dest is pc:
__check_state(i,fmap)
elif i.setflags:
__setflags(fmap,cond,cout,result)
def i_RRX(i,fmap):
cond,dest,op1 = __pre(i,fmap)
result,cout = RRX_C(fmap(op1),fmap(C))
fmap[dest] = tst(cond,result,fmap(dest))
if dest is pc:
__check_state(i,fmap)
elif i.setflags:
__setflags(fmap,cond,cout,result)
# multiply (4.4.3)
# general:
def i_MLA(i,fmap):
cond,dest,op1,op2,addend = __pre(i,fmap)
result = fmap((op1*op2)+addend)
fmap[dest] = tst(cond,result,fmap(dest))
if dest is pc:
__check_state(i,fmap)
elif i.setflags:
fmap[Z] = tst(cond,(result==0),fmap(Z))
fmap[N] = tst(cond,(result<0),fmap(N))
def i_MLS(i,fmap):
cond,dest,op1,op2,addend = __pre(i,fmap)
result = fmap(addend-(op1*op2))
fmap[dest] = tst(cond,result,fmap(dest))
if dest is pc:
__check_state(i,fmap)
def i_MUL(i,fmap):
cond,dest,op1,op2 = __pre(i,fmap)
result = fmap(op1*op2)
fmap[dest] = tst(cond,result,fmap(dest))
# signed:
# SMLABB, SMLABT, SMLATB, SMLATT
def i_SMLABB(i,fmap):
cond,dest,Rn,Rm,Ra = __pre(i,fmap)
op1 = Rn[0:16]
op2 = Rm[0:16]
result = fmap((op1**op2) + Ra)
fmap[dest] = tst(cond,result,fmap(dest))
overflow = top(1)
fmap[V] = tst(cond,overflow,fmap(V))
def i_SMLABT(i,fmap):
cond,dest,Rn,Rm,Ra = __pre(i,fmap)
op1 = Rn[0:16]
op2 = Rm[16:32]
result = fmap((op1**op2) + Ra)
fmap[dest] = tst(cond,result,fmap(dest))
overflow = top(1)
fmap[V] = tst(cond,overflow,fmap(V))
def i_SMLATT(i,fmap):
cond,dest,Rn,Rm,Ra = __pre(i,fmap)
op1 = Rn[16:32]
op2 = Rm[16:32]
result = fmap((op1**op2) + Ra)
fmap[dest] = tst(cond,result,fmap(dest))
overflow = top(1)
fmap[V] = tst(cond,overflow,fmap(V))
def i_SMLATB(i,fmap):
cond,dest,Rn,Rm,Ra = __pre(i,fmap)
op1 = Rn[16:32]
op2 = Rm[0:16]
result = fmap((op1**op2) + Ra)
fmap[dest] = tst(cond,result,fmap(dest))
overflow = top(1)
fmap[V] = tst(cond,overflow,fmap(V))
def i_SMLAD(i,fmap):
cond,dest,Rn,Rm,Ra = __pre(i,fmap)
p1 = Rn[0:16] ** Rm[0:16]
p2 = Rn[16:32] ** Rm[16:32]
result = fmap(p1 + p2 + Ra)
fmap[dest] = tst(cond,result,fmap(dest))
overflow = top(1)
fmap[V] = tst(cond,overflow,fmap(V))
def i_SMLADX(i,fmap):
cond,dest,Rn,Rm,Ra = __pre(i,fmap)
p1 = Rn[0:16] ** Rm[16:32]
p2 = Rn[16:32] ** Rm[0:16]
result = fmap(p1 + p2 + Ra)
fmap[dest] = tst(cond,result,fmap(dest))
overflow = top(1)
fmap[V] = tst(cond,overflow,fmap(V))
def i_SMLAL(i,fmap):
cond,RdLo,RdHi,Rn,Rm = __pre(i,fmap)
result = fmap(Rn**Rm + composer([RdLo,RdHi]))
fmap[RdLo] = tst(cond,result[0:32],fmap(RdLo))
fmap[RdHi] = tst(cond,result[32:64],fmap(RdHi))
if i.setflags:
fmap[Z] = tst(cond,(result==0),fmap(Z))
fmap[N] = tst(cond,result.bit(63),fmap(N))
def i_SMLALBB(i,fmap):
cond,RdLo,RdHi,Rn,Rm = __pre(i,fmap)
op1 = Rn[0:16]
op2 = Rm[0:16]
result = fmap((op1**op2).signextend(64) + composer([RdLo,RdHi]))
fmap[RdLo] = tst(cond,result[0:32],fmap(RdLo))
fmap[RdHi] = tst(cond,result[32:64],fmap(RdHi))
def i_SMLALBT(i,fmap):
cond,RdLo,RdHi,Rn,Rm = __pre(i,fmap)
op1 = Rn[0:16]
op2 = Rm[16:32]
result = fmap((op1**op2).signextend(64) + composer([RdLo,RdHi]))
fmap[RdLo] = tst(cond,result[0:32],fmap(RdLo))
fmap[RdHi] = tst(cond,result[32:64],fmap(RdHi))
def i_SMLALTT(i,fmap):
cond,RdLo,RdHi,Rn,Rm = __pre(i,fmap)
op1 = Rn[16:32]
op2 = Rm[16:32]
result = fmap((op1**op2).signextend(64) + composer([RdLo,RdHi]))
fmap[RdLo] = tst(cond,result[0:32],fmap(RdLo))
fmap[RdHi] = tst(cond,result[32:64],fmap(RdHi))
def i_SMLALTB(i,fmap):
cond,RdLo,RdHi,Rn,Rm = __pre(i,fmap)
op1 = Rn[16:32]
op2 = Rm[0:16]
result = fmap((op1**op2).signextend(64) + composer([RdLo,RdHi]))
fmap[RdLo] = tst(cond,result[0:32],fmap(RdLo))
fmap[RdHi] = tst(cond,result[32:64],fmap(RdHi))
# SMLALD
# SMLAWB, SMLAWT
# SMLSD
# SMLSLD
# SMMLA
# SMMLS
# SMMUL
# SMUAD
# SMULB, SMULBT, SMULTB, SMULTT
# SMULL
# SMULWB, SMULWT
# SMUSD
# saturation (4.4.4)
# SSAT
# SSAT16
# USAT
# USAT16
# packing/unpacking (4.4.5)
# PKH
# SXTAB
# SXTAB16
# SXTAH
# SXTB
# SXTB16
# SXTH
# UXTAB
# UXTAB16
# UXTAH
# UXTB
# UXTB16
# UXTH
# miscellaneous (4.4.6)
def i_BFC(i,fmap):
cond,dest,lsb,size = __pre(i,fmap)
src = fmap(dest)
result = composer([src[0:lsb],cst(0,size),src[lsb+size:src.size]])
fmap[dest] = tst(cond,result,fmap(dest))
if dest is pc:
raise InstructionError(i)
def i_BFI(i,fmap):
cond,dest,src,lsb,size = __pre(i,fmap)
src = fmap(src)
result = composer([dest[0:lsb],src[lsb,lsb+size],dest[lsb+size:dest.size]])
fmap[dest] = tst(cond,result,fmap(dest))
if dest is pc:
raise InstructionError(i)
def i_CLZ(i,fmap):
cond,dest,src = __pre(i,fmap)
result = fmap(src)
if result._is_cst:
result = [(result.value>>i)&1 for i in range(result.size)]
result = cst(result.find(1),dest.size)
else:
result = top(dest.size)
fmap[dest] = tst(cond,result,fmap(dest))
# MOVT
# RBIT
# REV
# REV16
# REVSH
# SBFX
# SEL
# UBFX
# USAD8
# USADA8
# parallel addition/substraction (4.4.7)
# ADD16
# ASX
# SAX
# SUB16
# ADD8
# SUB8
# divide (4.4.8)
# SDIV
# UDIV
# apsr access (A4.5)
# CPS
# MRS
# MSR
# load/store (A4.6)
def i_LDR(i,fmap):
cond,dest,src,sht = __pre(i,fmap)
off_addr = (src+sht) if i.add else (src-sht)
adr = off_addr if i.index else src
result = fmap(mem(adr,32))
if i.wback:
fmap[src] = tst(cond,fmap(off_addr),fmap(src))
fmap[dest] = tst(cond,result,fmap(dest))
def i_LDREX(i,fmap):
cond,dest,src,imm = __pre(i,fmap)
off_addr = (src+imm)
adr = off_addr
result = fmap(mem(adr,32))
fmap[dest] = tst(cond,result,fmap(dest))
# exclusive monitor not supported
def i_LDRB(i,fmap):
cond,dest,src,sht = __pre(i,fmap)
off_addr = (src+sht) if i.add else (src-sht)
adr = off_addr if i.index else src
result = fmap(mem(adr,8)).zeroextend(32)
fmap[dest] = tst(cond,result,fmap(dest))
if i.wback:
fmap[src] = tst(cond,fmap(off_addr),fmap(src))
def i_LDREXB(i,fmap):
cond,dest,src,imm = __pre(i,fmap)
off_addr = (src+imm)
adr = off_addr
result = fmap(mem(adr,8)).zeroextend(32)
fmap[dest] = tst(cond,result,fmap(dest))
# exclusive monitor not supported
def i_LDRH(i,fmap):
cond,dest,src,sht = __pre(i,fmap)
off_addr = (src+sht) if i.add else (src-sht)
adr = off_addr if i.index else src
result = fmap(mem(adr,16)).zeroextend(32)
fmap[dest] = tst(cond,result,fmap(dest))
if i.wback:
fmap[src] = tst(cond,fmap(off_addr),fmap(src))
def i_LDREXH(i,fmap):
cond,dest,src,imm = __pre(i,fmap)
off_addr = (src+imm)
adr = off_addr
result = fmap(mem(adr,16)).zeroextend(32)
fmap[dest] = tst(cond,result,fmap(dest))
# exclusive monitor not supported
def i_LDRSB(i,fmap):
cond,dest,src,sht = __pre(i,fmap)
off_addr = (src+sht) if i.add else (src-sht)
adr = off_addr if i.index else src
result = fmap(mem(adr,8)).signextend(32)
fmap[dest] = tst(cond,result,fmap(dest))
if i.wback:
fmap[src] = tst(cond,fmap(off_addr),fmap(src))
def i_LDRSH(i,fmap):
cond,dest,src,sht = __pre(i,fmap)
off_addr = (src+sht) if i.add else (src-sht)
adr = off_addr if i.index else src
result = fmap(mem(adr,16)).signextend(32)
fmap[dest] = tst(cond,result,fmap(dest))
if i.wback:
fmap[src] = tst(cond,fmap(off_addr),fmap(src))
def i_LDRD(i,fmap):
fmap[pc] = fmap(pc+i.length)
cond = fmap(CONDITION[i.cond][1])
dst1,dst2,src,sht = i.operands
if src is pc: src = src+i.length
off_addr = (src+sht) if i.add else (src-sht)
adr = off_addr if i.index else src
res1 = fmap(mem(adr,32))
res2 = fmap(mem(adr+4,32))
fmap[dst1] = tst(cond,res1,fmap(dst1))
fmap[dst2] = tst(cond,res2,fmap(dst2))
if i.wback:
fmap[src] = tst(cond,fmap(off_addr),fmap(src))
def i_LDRT(i,fmap):
cond,dest,src,sht = __pre(i,fmap)
off_addr = (src+sht) if i.add else (src-sht)
adr = off_addr if i.postindex else src
result = fmap(mem(adr,32))
if i.postindex:
fmap[src] = tst(cond,fmap(off_addr),fmap(src))
fmap[dest] = tst(cond,result,fmap(dest))
def i_LDRBT(i,fmap):
cond,dest,src,sht = __pre(i,fmap)
off_addr = (src+sht) if i.add else (src-sht)
adr = off_addr if i.postindex else src
result = fmap(mem(adr,8)).zeroextend(32)
if i.postindex:
fmap[src] = tst(cond,fmap(off_addr),fmap(src))
fmap[dest] = tst(cond,result,fmap(dest))
def i_LDRHT(i,fmap):
cond,dest,src,sht = __pre(i,fmap)
off_addr = (src+sht) if i.add else (src-sht)
adr = off_addr if i.postindex else src
result = fmap(mem(adr,16)).zeroextend(32)
if i.postindex:
fmap[src] = tst(cond,fmap(off_addr),fmap(src))
fmap[dest] = tst(cond,result,fmap(dest))
def i_LDRSBT(i,fmap):
cond,dest,src,sht = __pre(i,fmap)
off_addr = (src+sht) if i.add else (src-sht)
adr = off_addr if i.postindex else src
result = fmap(mem(adr,8)).signextend(32)
if i.postindex:
fmap[src] = tst(cond,fmap(off_addr),fmap(src))
fmap[dest] = tst(cond,result,fmap(dest))
def i_LDRSHT(i,fmap):
cond,dest,src,sht = __pre(i,fmap)
off_addr = (src+sht) if i.add else (src-sht)
adr = off_addr if i.postindex else src
result = fmap(mem(adr,16)).signextend(32)
if i.postindex:
fmap[src] = tst(cond,fmap(off_addr),fmap(src))
fmap[dest] = tst(cond,result,fmap(dest))
def i_STR(i,fmap):
cond,dest,src,sht = __pre(i,fmap)
off_addr = (src+sht) if i.add else (src-sht)
adr = off_addr if i.index else src
result = fmap(dest)
if i.wback:
fmap[src] = tst(cond,fmap(off_addr),fmap(src))
fmap[mem(adr,32)] = tst(cond,result,fmap(mem(adr,32)))
def i_STREX(i,fmap):
cond,dest,src,imm = __pre(i,fmap)
off_addr = (src+imm)
adr = off_addr
result = fmap(dest)
fmap[mem(adr,32)] = tst(cond,result,fmap(mem(adr,32)))
# exclusive monitor not supported
def i_STRB(i,fmap):
cond,dest,src,sht = __pre(i,fmap)
off_addr = (src+sht) if i.add else (src-sht)
adr = off_addr if i.index else src
result = fmap(dest[0:8])
fmap[mem(adr,8)] = tst(cond,result,mem(adr,8))
if i.wback:
fmap[src] = tst(cond,fmap(off_addr),fmap(src))
def i_STREXB(i,fmap):
cond,dest,src,imm = __pre(i,fmap)
off_addr = (src+imm)
adr = off_addr
result = fmap(dest[0:8])
fmap[mem(adr,8)] = tst(cond,result,fmap(mem(adr,8)))
# exclusive monitor not supported
def i_STRH(i,fmap):
cond,dest,src,sht = __pre(i,fmap)
off_addr = (src+sht) if i.add else (src-sht)
adr = off_addr if i.index else src
result = fmap(dest[0:16])
fmap[mem(adr,16)] = tst(cond,result,fmap(mem(adr,16)))
if i.wback:
fmap[src] = tst(cond,fmap(off_addr),fmap(src))
def i_STREXH(i,fmap):
cond,dest,src,imm = __pre(i,fmap)
off_addr = (src+imm)
adr = off_addr
result = fmap(dest[0:16])
fmap[mem(adr,16)] = tst(cond,result,fmap(mem(adr,16)))
# exclusive monitor not supported
def i_STRD(i,fmap):
fmap[pc] = fmap(pc+i.length)
cond = fmap(CONDITION[i.cond][1])
dst1,dst2,src,sht = i.operands
if src is pc: src = src+i.length
off_addr = (src+sht) if i.add else (src-sht)
adr = off_addr if i.index else src
adr1 = mem(adr,32)
adr2 = mem(adr+4,32)
res1 = fmap(dst1)
res2 = fmap(dst2)
fmap[adr1] = tst(cond,res1,fmap(adr1))
fmap[adr2] = tst(cond,res2,fmap(adr2))
if i.wback:
fmap[src] = tst(cond,fmap(off_addr),fmap(src))
def i_STRT(i,fmap):
cond,dest,src,sht = __pre(i,fmap)
off_addr = (src+sht) if i.add else (src-sht)
adr = off_addr if i.postindex else src
adr1 = mem(adr,32)
result = fmap(dest)
if i.postindex:
fmap[src] = tst(cond,fmap(off_addr),fmap(src))
fmap[adr1] = tst(cond,result,fmap(adr1))
def i_STRBT(i,fmap):
cond,dest,src,sht = __pre(i,fmap)
off_addr = (src+sht) if i.add else (src-sht)
adr = off_addr if i.postindex else src
adr1 = mem(adr,8)
result = fmap(dest[0:8])
if i.postindex:
fmap[src] = tst(cond,fmap(off_addr),fmap(src))
fmap[adr1] = tst(cond,result,fmap(adr1))
def i_STRHT(i,fmap):
cond,dest,src,sht = __pre(i,fmap)
off_addr = (src+sht) if i.add else (src-sht)
adr = off_addr if i.postindex else src
adr1 = mem(adr,16)
result = fmap(dest[0:16])
if i.postindex:
fmap[src] = tst(cond,fmap(off_addr),fmap(src))
fmap[adr1] = tst(cond,result,fmap(adr1))
# load/store multiple (A4.7)
# LDM, LDMIA, LDMFD
# LDMDA, LDMFA
# LDMDB, LDMEA
# LDMIB, LDMED
def i_POP(i,fmap):
fmap[pc] = fmap(pc+i.length)
cond = CONDITION[i.cond][1]
regs = i.operands[0]
adr = sp
for _r in regs:
fmap[_r] = fmap(tst(cond,mem(adr,32),_r))
adr = adr+4
fmap[sp] = fmap(tst(cond,sp+(4*len(regs)),sp))
def i_PUSH(i,fmap):
fmap[pc] = fmap(pc+i.length)
cond = CONDITION[i.cond][1]
regs = i.operands[0]
adr = sp-(4*len(regs))
for _r in regs:
if _r is pc: _r = _r+i.length
fmap[mem(adr,32)] = fmap(tst(cond,_r,mem(adr,32)))
adr = adr+4
fmap[sp] = fmap(tst(cond,sp-(4*len(regs)),sp))
# STM, STMIA, STMEA
# STMDA, STMED
# STMDB, STMFD
# STMIB, STMFA
# miscellaneous (A4.8)
def i_CLREX(i,fmap):
fmap[pc] = fmap(pc+i.length)
# exclusive monitor not supported
def i_DBG(i,fmap):
fmap[pc] = fmap(pc+i.length)
# debug hint
def i_DMB(i,fmap):
fmap[pc] = fmap(pc+i.length)
def i_DSB(i,fmap):
fmap[pc] = fmap(pc+i.length)
def i_ISB(i,fmap):
fmap[pc] = fmap(pc+i.length)
def i_IT(i,fmap):
assert internals['isetstate']==1
fmap[pc] = fmap(pc+i.length)
internals['itstate'] = 1
def i_NOP(i,fmap):
fmap[pc] = fmap(pc+i.length)
def i_WFE(i,fmap):
fmap[pc] = fmap(pc+i.length)
def i_WFI(i,fmap):
fmap[pc] = fmap(pc+i.length)
def i_YIELD(i,fmap):
fmap[pc] = fmap(pc+i.length)
# pre-load data hint
def i_PLD(i,fmap):
fmap[pc] = fmap(pc+i.length)
# pre-load data wide hint
def i_PLDW(i,fmap):
fmap[pc] = fmap(pc+i.length)
# pre-load instruction hint
def i_PLI(i,fmap):
fmap[pc] = fmap(pc+i.length)
# change endianess
def i_SETEND(i,fmap):
fmap[pc] = fmap(pc+i.length)
internals['endianstate'] = 1 if i.set_bigend else 0
exp.setendian(-1 if i.set_bigend else +1)
# event hint
def i_SEV(i,fmap):
fmap[pc] = fmap(pc+i.length)
# supervisor call
def i_SVC(i,fmap):
fmap[pc] = fmap(pc+i.length)
logger.info('call to supervisor is unsupported')
def i_SWP(i,fmap):
fmap[pc] = fmap(pc+i.length)
Rt,Rt2,Rn = i.operands
data = fmap(mem(Rn,32))
fmap[mem(Rn,32)] = fmap(Rt2)
fmap[Rt] = data
def i_SWPB(i,fmap):
fmap[pc] = fmap(pc+i.length)
Rt,Rt2,Rn = i.operands
data = fmap(mem(Rn,8))
fmap[mem(Rn,8)] = fmap(Rt2)[0:8]
fmap[Rt] = data.zeroextend(32)
def i_ENTERX(i,fmap):
fmap[pc] = fmap(pc+i.length)
internals['isetstate'] = 3
def i_LEAVEX(i,fmap):
fmap[pc] = fmap(pc+i.length)
internals['isetstate'] = 1
def i_SMC(i,fmap):
raise InstructionError(i)
# coprocessor (A4.9)
# MCR, MCR2
# MCRR, MCRR2
# MRC, MRC2
# MRRC, MRRC2
# LDC, LDC2
# STC, STC2
# SIMD and VFP (A4.10)
# NOT IMPLEMENTED
| gpl-2.0 |
402231444/c2g16 | w2/static/Brython2.0.0-20140209-164925/Lib/unittest/test/_test_warnings.py | 858 | 2304 | # helper module for test_runner.Test_TextTestRunner.test_warnings
"""
This module has a number of tests that raise different kinds of warnings.
When the tests are run, the warnings are caught and their messages are printed
to stdout. This module also accepts an arg that is then passed to
unittest.main to affect the behavior of warnings.
Test_TextTestRunner.test_warnings executes this script with different
combinations of warnings args and -W flags and check that the output is correct.
See #10535.
"""
import sys
import unittest
import warnings
def warnfun():
warnings.warn('rw', RuntimeWarning)
class TestWarnings(unittest.TestCase):
# unittest warnings will be printed at most once per type (max one message
# for the fail* methods, and one for the assert* methods)
def test_assert(self):
self.assertEquals(2+2, 4)
self.assertEquals(2*2, 4)
self.assertEquals(2**2, 4)
def test_fail(self):
self.failUnless(1)
self.failUnless(True)
def test_other_unittest(self):
self.assertAlmostEqual(2+2, 4)
self.assertNotAlmostEqual(4+4, 2)
# these warnings are normally silenced, but they are printed in unittest
def test_deprecation(self):
warnings.warn('dw', DeprecationWarning)
warnings.warn('dw', DeprecationWarning)
warnings.warn('dw', DeprecationWarning)
def test_import(self):
warnings.warn('iw', ImportWarning)
warnings.warn('iw', ImportWarning)
warnings.warn('iw', ImportWarning)
# user warnings should always be printed
def test_warning(self):
warnings.warn('uw')
warnings.warn('uw')
warnings.warn('uw')
# these warnings come from the same place; they will be printed
# only once by default or three times if the 'always' filter is used
def test_function(self):
warnfun()
warnfun()
warnfun()
if __name__ == '__main__':
with warnings.catch_warnings(record=True) as ws:
# if an arg is provided pass it to unittest.main as 'warnings'
if len(sys.argv) == 2:
unittest.main(exit=False, warnings=sys.argv.pop())
else:
unittest.main(exit=False)
# print all the warning messages collected
for w in ws:
print(w.message)
| gpl-2.0 |
rajashreer7/autotest-client-tests | linux-tools/pixman/pixman.py | 4 | 1243 | #!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error
class pixman(test.test):
"""
Autotest module for testing basic functionality
of pixman
@author Kingsuk Deb, kingsdeb@linux.vnet.ibm.com ##
"""
version = 1
nfail = 0
path = ''
def initialize(self):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.Popen(['./pixman.sh'], cwd="%s/pixman" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
| gpl-2.0 |
omarocegueda/dipy | doc/examples/viz_ui.py | 1 | 3092 | import numpy as np
from dipy.data import read_viz_icons
# Conditional import machinery for vtk.
from dipy.utils.optpkg import optional_package
# Allow import, but disable doctests if we don't have vtk.
from dipy.viz import ui, window
vtk, have_vtk, setup_module = optional_package('vtk')
if have_vtk:
vtkInteractorStyleUser = vtk.vtkInteractorStyleUser
version = vtk.vtkVersion.GetVTKSourceVersion().split(' ')[-1]
major_version = vtk.vtkVersion.GetVTKMajorVersion()
else:
vtkInteractorStyleUser = object
numpy_support, have_ns, _ = optional_package('vtk.util.numpy_support')
# Cube Actors
def cube_maker(color=None, size=(0.2, 0.2, 0.2), center=None):
cube = vtk.vtkCubeSource()
cube.SetXLength(size[0])
cube.SetYLength(size[1])
cube.SetZLength(size[2])
if center is not None:
cube.SetCenter(*center)
cube_mapper = vtk.vtkPolyDataMapper()
cube_mapper.SetInputConnection(cube.GetOutputPort())
cube_actor = vtk.vtkActor()
cube_actor.SetMapper(cube_mapper)
if color is not None:
cube_actor.GetProperty().SetColor(color)
return cube_actor
cube_actor_1 = cube_maker((1, 0, 0), (50, 50, 50), center=(0, 0, 0))
cube_actor_2 = cube_maker((0, 1, 0), (10, 10, 10), center=(100, 0, 0))
# /Cube Actors
# Buttons
icon_files = dict()
icon_files['stop'] = read_viz_icons(fname='stop2.png')
icon_files['play'] = read_viz_icons(fname='play3.png')
icon_files['plus'] = read_viz_icons(fname='plus.png')
icon_files['cross'] = read_viz_icons(fname='cross.png')
button_example = ui.Button2D(icon_fnames=icon_files)
def left_mouse_button_click(i_ren, obj, button):
print("Left Button Clicked")
def left_mouse_button_drag(i_ren, obj, button):
print ("Left Button Dragged")
button_example.on_left_mouse_button_drag = left_mouse_button_drag
button_example.on_left_mouse_button_pressed = left_mouse_button_click
def right_mouse_button_drag(i_ren, obj, button):
print("Right Button Dragged")
def right_mouse_button_click(i_ren, obj, button):
print ("Right Button Clicked")
button_example.on_right_mouse_button_drag = right_mouse_button_drag
button_example.on_right_mouse_button_pressed = right_mouse_button_click
second_button_example = ui.Button2D(icon_fnames=icon_files)
def modify_button_callback(i_ren, obj, button):
# i_ren: CustomInteractorStyle
# obj: vtkActor picked
# button: Button2D
button.next_icon()
i_ren.force_render()
second_button_example.on_left_mouse_button_pressed = modify_button_callback
# /Buttons
# Panel
panel = ui.Panel2D(center=(440, 90), size=(300, 150), color=(1, 1, 1), align="right")
panel.add_element(button_example, 'relative', (0.2, 0.2))
panel.add_element(second_button_example, 'absolute', (480, 100))
# /Panel
# TextBox
text = ui.TextBox2D(height=3, width=10)
# /TextBox
# Show Manager
current_size = (600, 600)
show_manager = window.ShowManager(size=current_size, title="DIPY UI Example")
show_manager.ren.add(cube_actor_1)
show_manager.ren.add(cube_actor_2)
show_manager.ren.add(panel)
show_manager.ren.add(text)
show_manager.start()
| bsd-3-clause |
jaggu303619/asylum-v2.0 | openerp/addons/purchase_double_validation/__init__.py | 441 | 1090 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import purchase_double_validation_installer
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ian-r-rose/SHTOOLS | src/create_wrapper.py | 2 | 5850 | #!/usr/bin/env python
"""
Automatically creates python-wrapper subroutines from the interface file
SHTOOLS.f95. Unfortunately all assumed array shapes have to be changed because
their structure is only known by the Fortran compiler and can not be directly
exposed to C. It is possible that newer f2py versions can handle assumed array
shapes using a similar procedure.
"""
#==== IMPORTS ====
from numpy.f2py import crackfortran
import re
from copy import deepcopy
#==== MAIN FUNCTION ====
def main():
fname_fortran = 'SHTOOLS.f95'
fname_wrapper = 'PythonWrapper.f95'
outfile = open(fname_wrapper, 'w')
print 'now cracking Fortran file SHTOOLS.f95 using f2py function...'
crackfortran.verbose = False
crackfortran.dolowercase = False
cracked_shtools = crackfortran.crackfortran(fname_fortran)
print 'decending through shtools module tree...'
module = cracked_shtools[0]
interface_old = module['body'][0]
interface_new = deepcopy(interface_old)
for subroutine in interface_new['body']:
modify_subroutine(subroutine)
print 'create interface string...'
wrapper = crackfortran.crack2fortran(interface_new)
wrapperlines = wrapper.split('\n')
print 'add implicit none statements'
# search for the indices of 'use shtools,' to insert 'implicit none' after
iusestatement = [iline for iline, line in enumerate(wrapperlines) if 'use shtools,' in line]
assert len(iusestatement) == len(interface_new['body']), 'number of subroutines don\'t match'
for iline in iusestatement[::-1]:
wrapperlines.insert(iline + 1, 2 * crackfortran.tabchar + 'implicit none')
print 'add shtools subroutine calls...'
# search for the indices of 'end subroutine'
iendsubroutine = [iline for iline, line in enumerate(wrapperlines)
if 'end subroutine' in line or 'end function' in line]
assert len(iendsubroutine) == len(interface_new['body']), 'number of subroutines don\'t match'
# insert call statements before 'end subroutine' line starting from the end such that we
# don't change the preceding indices
for sroutine_new, sroutine_old, iline in zip(interface_new['body'],
interface_old['body'],
iendsubroutine)[::-1]:
if sroutine_new['block'] == 'function':
newline = 2 * crackfortran.tabchar +\
'%s=%s(' % (sroutine_new['name'], sroutine_old['name']) +\
','.join(sroutine_old['args']) + ')'
elif sroutine_new['block'] == 'subroutine':
newline = 2 * crackfortran.tabchar +\
'call %s(' % sroutine_old['name'] +\
','.join(sroutine_old['args']) + ')'
wrapperlines.insert(iline + 1, '')
wrapperlines.insert(iline, newline)
print 'writing wrapper to file %s' % fname_wrapper
for iline, line in enumerate(wrapperlines):
try:
firstword = line.split()[0]
secondword = line.split()[1]
words = ['real*8', 'integer', 'integer(kind=4)', 'character*(*)', 'complex*16']
for word in words:
if firstword == word and not secondword[0] == ':' or secondword[0] == ',':
line = line.replace(word, word + ',')
wrapperlines[iline] = line
except IndexError:
pass
for line in wrapperlines[4:-5]:
line = line.replace('! in SHTOOLS.f95:SHTOOLS:unknown_interface', '')
if len(line) <= 100:
outfile.write(line + '\n')
else:
elems = line.split(',')
newline = elems[0]
for elem in elems[1:]:
if len(newline) > 100:
outfile.write(newline + '&\n')
newline = ' ' * len(elems[0])
newline += ',' + elem
outfile.write(newline + '\n')
outfile.close()
print '\n==== ALL DONE ====\n'
#==== FUNCTIONS ====
def modify_subroutine(subroutine):
"""loops through variables of a subroutine and modifies them"""
# print '\n----',subroutine['name'],'----'
#-- use original function from shtools:
subroutine['use'] = {'shtools': {'map': {subroutine['name']: subroutine['name']}, 'only': 1}}
#-- loop through variables:
for varname, varattribs in subroutine['vars'].items():
#-- prefix function return variables with 'py'
if varname == subroutine['name']:
subroutine['vars']['py' + varname] = subroutine['vars'].pop(varname)
varname = 'py' + varname
# print 'prefix added:',varname
#-- change assumed to explicit:
if has_assumed_shape(varattribs):
make_explicit(subroutine, varname, varattribs)
# print 'assumed shape variable modified to:',varname,varattribs['dimension']
#-- add py prefix to subroutine:
subroutine['name'] = 'py' + subroutine['name']
def make_explicit(subroutine, varname, varattribs):
dimattribs = {'attrspec': [], 'typespec': 'integer', 'intent': ['in']}
for idim, size in enumerate(varattribs['dimension']):
if size == ':':
# change assumed array to explicit
dimname = '%s_d%d' % (varname, idim)
varattribs['dimension'][idim] = dimname
# declare dimension in subroutine variables
subroutine['vars'][dimname] = dimattribs
# add dimension to subroutine arguments
subroutine['args'].append(dimname)
def has_assumed_shape(varattribs):
"""checks if variable has assumed shape"""
try:
if ':' in varattribs['dimension']:
return True
else:
return False
except KeyError:
return False
#==== EXECUTE SCRIPT ====
if __name__ == "__main__":
main()
| bsd-3-clause |
dasmarci/check_mk | a10/checks/a10_cpu.py | 1 | 1914 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
factory_settings["a10_cpu_default_levels"] = {'cpu': (80.0, 90.0),}
a10_cpus = {
0: "average",
1: "Control",
2: "Data"
}
def inventory_a10_cpu(info):
if info:
return [ (None, "a10_cpu_default_levels") ]
def check_a10_cpu(item, params, info):
if info:
warn, crit = params['cpu']
state = 0
infotxt = []
perfdata = []
for cpu in a10_cpus:
if info[0][cpu]:
name = a10_cpus[cpu]
util = saveint(info[0][cpu])
label = ""
perfdata.append(('"%s"' % name, util, warn, crit, 0, 100))
if util >= crit:
state = max(2, state)
label = "(!!)"
elif util >= warn:
state = max(1, state)
label = "(!)"
infotxt.append("%s CPU %d%%%s" %(name, util, label))
return (state, ", ".join(infotxt) + " (levels at %.1f/%.1f)" % (warn, crit), perfdata)
return (3, "No data received for %s" % item)
check_info['a10_cpu'] = {
"check_function" : check_a10_cpu,
"inventory_function" : inventory_a10_cpu,
"has_perfdata" : True,
"service_description" : "CPU utilization",
"group" : 'cpu_utilization',
"default_levels_variable" : "a10_cpu_default_levels",
"snmp_scan_function" : lambda oid: "AX Series" in oid(".1.3.6.1.2.1.1.1.0"),
"snmp_info" : ('.1.3.6.1.4.1.22610.2.4.1.3',
[
'3', # A10AvgCpuUsage
'4', # A10CtrlCpuUsage
'5' # A10DataCpuUsage
]),
}
| gpl-2.0 |
anirudhSK/chromium | build/android/screenshot.py | 3 | 3317 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Takes a screenshot or a screen video capture from an Android device."""
import logging
import optparse
import os
import sys
from pylib import android_commands
from pylib import screenshot
def _PrintMessage(heading, eol='\n'):
sys.stdout.write('%s%s' % (heading, eol))
sys.stdout.flush()
def _CaptureScreenshot(adb, host_file):
host_file = adb.TakeScreenshot(host_file)
_PrintMessage('Screenshot written to %s' % os.path.abspath(host_file))
def _CaptureVideo(adb, host_file, options):
size = tuple(map(int, options.size.split('x'))) if options.size else None
recorder = screenshot.VideoRecorder(adb,
host_file,
megabits_per_second=options.bitrate,
size=size,
rotate=options.rotate)
try:
recorder.Start()
_PrintMessage('Recording. Press Enter to stop...', eol='')
raw_input()
finally:
recorder.Stop()
host_file = recorder.Pull()
_PrintMessage('Video written to %s' % os.path.abspath(host_file))
def main():
# Parse options.
parser = optparse.OptionParser(description=__doc__,
usage='screenshot.py [options] [filename]')
parser.add_option('-d', '--device', metavar='ANDROID_DEVICE', help='Serial '
'number of Android device to use.', default=None)
parser.add_option('-f', '--file', help='Save result to file instead of '
'generating a timestamped file name.', metavar='FILE')
parser.add_option('-v', '--verbose', help='Verbose logging.',
action='store_true')
video_options = optparse.OptionGroup(parser, 'Video capture')
video_options.add_option('--video', help='Enable video capturing. Requires '
'Android KitKat or later', action='store_true')
video_options.add_option('-b', '--bitrate', help='Bitrate in megabits/s, '
'from 0.1 to 100 mbps, %default mbps by default.',
default=4, type='float')
video_options.add_option('-r', '--rotate', help='Rotate video by 90 degrees.',
default=False, action='store_true')
video_options.add_option('-s', '--size', metavar='WIDTHxHEIGHT',
help='Frame size to use instead of the device '
'screen size.', default=None)
parser.add_option_group(video_options)
(options, args) = parser.parse_args()
if options.verbose:
logging.getLogger().setLevel(logging.DEBUG)
if not options.device and len(android_commands.GetAttachedDevices()) > 1:
parser.error('Multiple devices are attached. '
'Please specify device serial number with --device.')
if len(args) > 1:
parser.error('Too many positional arguments.')
host_file = args[0] if args else options.file
adb = android_commands.AndroidCommands(options.device)
if options.video:
_CaptureVideo(adb, host_file, options)
else:
_CaptureScreenshot(adb, host_file)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
xavierwu/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py | 254 | 2005 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
rogst/datadogpy | tests/unit/api/helper.py | 3 | 3033 | # python
import unittest
# datadog
from datadog import initialize, api
from datadog.api.base import CreateableAPIResource, UpdatableAPIResource, DeletableAPIResource,\
GetableAPIResource, ListableAPIResource, ActionAPIResource
from datadog.util.compat import iteritems, json
# 3p
import requests
from mock import patch, Mock
API_KEY = "apikey"
APP_KEY = "applicationkey"
API_HOST = "host"
HOST_NAME = "agent.hostname"
FAKE_PROXY = {
"https": "http://user:pass@10.10.1.10:3128/",
}
class MockReponse(requests.Response):
content = None
def raise_for_status(self):
pass
# A few API Resources
class MyCreatable(CreateableAPIResource):
_class_url = '/creatables'
class MyUpdatable(UpdatableAPIResource):
_class_url = '/updatables'
class MyGetable(GetableAPIResource):
_class_url = '/getables'
class MyListable(ListableAPIResource):
_class_url = '/listables'
class MyDeletable(DeletableAPIResource):
_class_url = '/deletables'
class MyActionable(ActionAPIResource):
_class_url = '/actionables'
@classmethod
def trigger_class_action(cls, method, name, id=None, **params):
super(MyActionable, cls)._trigger_class_action(method, name, id, **params)
@classmethod
def trigger_action(cls, method, name, id=None, **params):
super(MyActionable, cls)._trigger_action(method, name, id, **params)
# Test classes
class DatadogAPITestCase(unittest.TestCase):
def setUp(self):
# Mock patch requests
self.request_patcher = patch('requests.Session')
request_class_mock = self.request_patcher.start()
self.request_mock = request_class_mock.return_value
self.request_mock.request = Mock(return_value=MockReponse())
def get_request_data(self):
"""
"""
_, kwargs = self.request_mock.request.call_args
return json.loads(kwargs['data'])
def request_called_with(self, method, url, data=None, params=None):
(req_method, req_url), others = self.request_mock.request.call_args
assert method == req_method, req_method
assert url == req_url, req_url
if data:
assert 'data' in others
assert json.dumps(data) == others['data'], others['data']
if params:
assert 'params' in others
for (k, v) in iteritems(params):
assert k in others['params'], others['params']
assert v == others['params'][k]
def tearDown(self):
self.request_patcher.stop()
class DatadogAPINoInitialization(DatadogAPITestCase):
def tearDown(self):
super(DatadogAPINoInitialization, self).tearDown()
# Restore default values
api._api_key = None
api._application_key = None
api._api_host = None
api._host_name = None
class DatadogAPIWithInitialization(DatadogAPITestCase):
def setUp(self):
super(DatadogAPIWithInitialization, self).setUp()
initialize(api_key=API_KEY, app_key=APP_KEY, api_host=API_HOST)
| bsd-3-clause |
beni55/edx-platform | common/djangoapps/student/migrations/0032_add_field_UserProfile_country_add_field_UserProfile_city.py | 114 | 10820 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserProfile.country'
db.add_column('auth_userprofile', 'country',
self.gf('django_countries.fields.CountryField')(max_length=2, null=True, blank=True),
keep_default=False)
# Adding field 'UserProfile.city'
db.add_column('auth_userprofile', 'city',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UserProfile.country'
db.delete_column('auth_userprofile', 'country')
# Deleting field 'UserProfile.city'
db.delete_column('auth_userprofile', 'city')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.anonymoususerid': {
'Meta': {'object_name': 'AnonymousUserId'},
'anonymous_user_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollmentallowed': {
'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'},
'auto_enroll': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'allow_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'city': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'student.userstanding': {
'Meta': {'object_name': 'UserStanding'},
'account_status': ('django.db.models.fields.CharField', [], {'max_length': '31', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'standing_last_changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'standing'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
| agpl-3.0 |
auferack08/edx-platform | common/lib/xmodule/xmodule/tests/test_capa_module.py | 15 | 74271 | # -*- coding: utf-8 -*-
"""
Tests of the Capa XModule
"""
# pylint: disable=C0111
# pylint: disable=R0904
# pylint: disable=C0103
# pylint: disable=C0302
import datetime
import json
import random
import os
import textwrap
import unittest
from mock import Mock, patch
import webob
from webob.multidict import MultiDict
import xmodule
from xmodule.tests import DATA_DIR
from capa.responsetypes import (StudentInputError, LoncapaProblemError,
ResponseError)
from capa.xqueue_interface import XQueueInterface
from xmodule.capa_module import CapaModule, ComplexEncoder
from opaque_keys.edx.locations import Location
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from . import get_test_system
from pytz import UTC
from capa.correctmap import CorrectMap
class CapaFactory(object):
"""
A helper class to create problem modules with various parameters for testing.
"""
sample_problem_xml = textwrap.dedent("""\
<?xml version="1.0"?>
<problem>
<text>
<p>What is pi, to two decimal places?</p>
</text>
<numericalresponse answer="3.14">
<textline math="1" size="30"/>
</numericalresponse>
</problem>
""")
num = 0
@classmethod
def next_num(cls):
cls.num += 1
return cls.num
@classmethod
def input_key(cls, response_num=2, input_num=1):
"""
Return the input key to use when passing GET parameters
"""
return ("input_" + cls.answer_key(response_num, input_num))
@classmethod
def answer_key(cls, response_num=2, input_num=1):
"""
Return the key stored in the capa problem answer dict
"""
return (
"%s_%d_%d" % (
"-".join(['i4x', 'edX', 'capa_test', 'problem', 'SampleProblem%d' % cls.num]),
response_num,
input_num
)
)
@classmethod
def create(cls,
attempts=None,
problem_state=None,
correct=False,
xml=None,
**kwargs
):
"""
All parameters are optional, and are added to the created problem if specified.
Arguments:
graceperiod:
due:
max_attempts:
showanswer:
force_save_button:
rerandomize: all strings, as specified in the policy for the problem
problem_state: a dict to to be serialized into the instance_state of the
module.
attempts: also added to instance state. Will be converted to an int.
"""
location = Location(
"edX",
"capa_test",
"2012_Fall",
"problem",
"SampleProblem{0}".format(cls.next_num()),
None
)
if xml is None:
xml = cls.sample_problem_xml
field_data = {'data': xml}
field_data.update(kwargs)
descriptor = Mock(weight="1")
if problem_state is not None:
field_data.update(problem_state)
if attempts is not None:
# converting to int here because I keep putting "0" and "1" in the tests
# since everything else is a string.
field_data['attempts'] = int(attempts)
system = get_test_system()
system.render_template = Mock(return_value="<div>Test Template HTML</div>")
module = CapaModule(
descriptor,
system,
DictFieldData(field_data),
ScopeIds(None, None, location, location),
)
if correct:
# TODO: probably better to actually set the internal state properly, but...
module.get_score = lambda: {'score': 1, 'total': 1}
else:
module.get_score = lambda: {'score': 0, 'total': 1}
return module
class CapaFactoryWithFiles(CapaFactory):
"""
A factory for creating a Capa problem with files attached.
"""
sample_problem_xml = textwrap.dedent("""\
<problem>
<coderesponse queuename="BerkeleyX-cs188x">
<!-- actual filenames here don't matter for server-side tests,
they are only acted upon in the browser. -->
<filesubmission
points="25"
allowed_files="prog1.py prog2.py prog3.py"
required_files="prog1.py prog2.py prog3.py"
/>
<codeparam>
<answer_display>
If you're having trouble with this Project,
please refer to the Lecture Slides and attend office hours.
</answer_display>
<grader_payload>{"project": "p3"}</grader_payload>
</codeparam>
</coderesponse>
<customresponse>
<text>
If you worked with a partner, enter their username or email address. If you
worked alone, enter None.
</text>
<textline points="0" size="40" correct_answer="Your partner's username or 'None'"/>
<answer type="loncapa/python">
correct=['correct']
s = str(submission[0]).strip()
if submission[0] == '':
correct[0] = 'incorrect'
</answer>
</customresponse>
</problem>
""")
class CapaModuleTest(unittest.TestCase):
def setUp(self):
now = datetime.datetime.now(UTC)
day_delta = datetime.timedelta(days=1)
self.yesterday_str = str(now - day_delta)
self.today_str = str(now)
self.tomorrow_str = str(now + day_delta)
# in the capa grace period format, not in time delta format
self.two_day_delta_str = "2 days"
def test_import(self):
module = CapaFactory.create()
self.assertEqual(module.get_score()['score'], 0)
other_module = CapaFactory.create()
self.assertEqual(module.get_score()['score'], 0)
self.assertNotEqual(module.url_name, other_module.url_name,
"Factory should be creating unique names for each problem")
def test_correct(self):
"""
Check that the factory creates correct and incorrect problems properly.
"""
module = CapaFactory.create()
self.assertEqual(module.get_score()['score'], 0)
other_module = CapaFactory.create(correct=True)
self.assertEqual(other_module.get_score()['score'], 1)
def test_showanswer_default(self):
"""
Make sure the show answer logic does the right thing.
"""
# default, no due date, showanswer 'closed', so problem is open, and show_answer
# not visible.
problem = CapaFactory.create()
self.assertFalse(problem.answer_available())
def test_showanswer_attempted(self):
problem = CapaFactory.create(showanswer='attempted')
self.assertFalse(problem.answer_available())
problem.attempts = 1
self.assertTrue(problem.answer_available())
def test_showanswer_closed(self):
# can see after attempts used up, even with due date in the future
used_all_attempts = CapaFactory.create(showanswer='closed',
max_attempts="1",
attempts="1",
due=self.tomorrow_str)
self.assertTrue(used_all_attempts.answer_available())
# can see after due date
after_due_date = CapaFactory.create(showanswer='closed',
max_attempts="1",
attempts="0",
due=self.yesterday_str)
self.assertTrue(after_due_date.answer_available())
# can't see because attempts left
attempts_left_open = CapaFactory.create(showanswer='closed',
max_attempts="1",
attempts="0",
due=self.tomorrow_str)
self.assertFalse(attempts_left_open.answer_available())
# Can't see because grace period hasn't expired
still_in_grace = CapaFactory.create(showanswer='closed',
max_attempts="1",
attempts="0",
due=self.yesterday_str,
graceperiod=self.two_day_delta_str)
self.assertFalse(still_in_grace.answer_available())
def test_showanswer_correct_or_past_due(self):
"""
With showanswer="correct_or_past_due" should show answer after the answer is correct
or after the problem is closed for everyone--e.g. after due date + grace period.
"""
# can see because answer is correct, even with due date in the future
answer_correct = CapaFactory.create(showanswer='correct_or_past_due',
max_attempts="1",
attempts="0",
due=self.tomorrow_str,
correct=True)
self.assertTrue(answer_correct.answer_available())
# can see after due date, even when answer isn't correct
past_due_date = CapaFactory.create(showanswer='correct_or_past_due',
max_attempts="1",
attempts="0",
due=self.yesterday_str)
self.assertTrue(past_due_date.answer_available())
# can also see after due date when answer _is_ correct
past_due_date_correct = CapaFactory.create(showanswer='correct_or_past_due',
max_attempts="1",
attempts="0",
due=self.yesterday_str,
correct=True)
self.assertTrue(past_due_date_correct.answer_available())
# Can't see because grace period hasn't expired and answer isn't correct
still_in_grace = CapaFactory.create(showanswer='correct_or_past_due',
max_attempts="1",
attempts="1",
due=self.yesterday_str,
graceperiod=self.two_day_delta_str)
self.assertFalse(still_in_grace.answer_available())
def test_showanswer_past_due(self):
"""
With showanswer="past_due" should only show answer after the problem is closed
for everyone--e.g. after due date + grace period.
"""
# can't see after attempts used up, even with due date in the future
used_all_attempts = CapaFactory.create(showanswer='past_due',
max_attempts="1",
attempts="1",
due=self.tomorrow_str)
self.assertFalse(used_all_attempts.answer_available())
# can see after due date
past_due_date = CapaFactory.create(showanswer='past_due',
max_attempts="1",
attempts="0",
due=self.yesterday_str)
self.assertTrue(past_due_date.answer_available())
# can't see because attempts left
attempts_left_open = CapaFactory.create(showanswer='past_due',
max_attempts="1",
attempts="0",
due=self.tomorrow_str)
self.assertFalse(attempts_left_open.answer_available())
# Can't see because grace period hasn't expired, even though have no more
# attempts.
still_in_grace = CapaFactory.create(showanswer='past_due',
max_attempts="1",
attempts="1",
due=self.yesterday_str,
graceperiod=self.two_day_delta_str)
self.assertFalse(still_in_grace.answer_available())
def test_showanswer_finished(self):
"""
With showanswer="finished" should show answer after the problem is closed,
or after the answer is correct.
"""
# can see after attempts used up, even with due date in the future
used_all_attempts = CapaFactory.create(showanswer='finished',
max_attempts="1",
attempts="1",
due=self.tomorrow_str)
self.assertTrue(used_all_attempts.answer_available())
# can see after due date
past_due_date = CapaFactory.create(showanswer='finished',
max_attempts="1",
attempts="0",
due=self.yesterday_str)
self.assertTrue(past_due_date.answer_available())
# can't see because attempts left and wrong
attempts_left_open = CapaFactory.create(showanswer='finished',
max_attempts="1",
attempts="0",
due=self.tomorrow_str)
self.assertFalse(attempts_left_open.answer_available())
# _can_ see because attempts left and right
correct_ans = CapaFactory.create(showanswer='finished',
max_attempts="1",
attempts="0",
due=self.tomorrow_str,
correct=True)
self.assertTrue(correct_ans.answer_available())
# Can see even though grace period hasn't expired, because have no more
# attempts.
still_in_grace = CapaFactory.create(showanswer='finished',
max_attempts="1",
attempts="1",
due=self.yesterday_str,
graceperiod=self.two_day_delta_str)
self.assertTrue(still_in_grace.answer_available())
def test_closed(self):
# Attempts < Max attempts --> NOT closed
module = CapaFactory.create(max_attempts="1", attempts="0")
self.assertFalse(module.closed())
# Attempts < Max attempts --> NOT closed
module = CapaFactory.create(max_attempts="2", attempts="1")
self.assertFalse(module.closed())
# Attempts = Max attempts --> closed
module = CapaFactory.create(max_attempts="1", attempts="1")
self.assertTrue(module.closed())
# Attempts > Max attempts --> closed
module = CapaFactory.create(max_attempts="1", attempts="2")
self.assertTrue(module.closed())
# Max attempts = 0 --> closed
module = CapaFactory.create(max_attempts="0", attempts="2")
self.assertTrue(module.closed())
# Past due --> closed
module = CapaFactory.create(max_attempts="1", attempts="0",
due=self.yesterday_str)
self.assertTrue(module.closed())
def test_due_date_extension(self):
module = CapaFactory.create(
max_attempts="1", attempts="0", due=self.yesterday_str,
extended_due=self.tomorrow_str)
self.assertFalse(module.closed())
def test_parse_get_params(self):
# Valid GET param dict
# 'input_5' intentionally left unset,
valid_get_dict = MultiDict({
'input_1': 'test',
'input_1_2': 'test',
'input_1_2_3': 'test',
'input_[]_3': 'test',
'input_4': None,
'input_6': 5
})
result = CapaModule.make_dict_of_responses(valid_get_dict)
# Expect that we get a dict with "input" stripped from key names
# and that we get the same values back
for key in result.keys():
original_key = "input_" + key
self.assertTrue(original_key in valid_get_dict,
"Output dict should have key %s" % original_key)
self.assertEqual(valid_get_dict[original_key], result[key])
# Valid GET param dict with list keys
# Each tuple represents a single parameter in the query string
valid_get_dict = MultiDict((('input_2[]', 'test1'), ('input_2[]', 'test2')))
result = CapaModule.make_dict_of_responses(valid_get_dict)
self.assertTrue('2' in result)
self.assertEqual(['test1', 'test2'], result['2'])
# If we use [] at the end of a key name, we should always
# get a list, even if there's just one value
valid_get_dict = MultiDict({'input_1[]': 'test'})
result = CapaModule.make_dict_of_responses(valid_get_dict)
self.assertEqual(result['1'], ['test'])
# If we have no underscores in the name, then the key is invalid
invalid_get_dict = MultiDict({'input': 'test'})
with self.assertRaises(ValueError):
result = CapaModule.make_dict_of_responses(invalid_get_dict)
# Two equivalent names (one list, one non-list)
# One of the values would overwrite the other, so detect this
# and raise an exception
invalid_get_dict = MultiDict({'input_1[]': 'test 1',
'input_1': 'test 2'})
with self.assertRaises(ValueError):
result = CapaModule.make_dict_of_responses(invalid_get_dict)
def test_check_problem_correct(self):
module = CapaFactory.create(attempts=1)
# Simulate that all answers are marked correct, no matter
# what the input is, by patching CorrectMap.is_correct()
# Also simulate rendering the HTML
# TODO: pep8 thinks the following line has invalid syntax
with patch('capa.correctmap.CorrectMap.is_correct') as mock_is_correct, \
patch('xmodule.capa_module.CapaModule.get_problem_html') as mock_html:
mock_is_correct.return_value = True
mock_html.return_value = "Test HTML"
# Check the problem
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.check_problem(get_request_dict)
# Expect that the problem is marked correct
self.assertEqual(result['success'], 'correct')
# Expect that we get the (mocked) HTML
self.assertEqual(result['contents'], 'Test HTML')
# Expect that the number of attempts is incremented by 1
self.assertEqual(module.attempts, 2)
def test_check_problem_incorrect(self):
module = CapaFactory.create(attempts=0)
# Simulate marking the input incorrect
with patch('capa.correctmap.CorrectMap.is_correct') as mock_is_correct:
mock_is_correct.return_value = False
# Check the problem
get_request_dict = {CapaFactory.input_key(): '0'}
result = module.check_problem(get_request_dict)
# Expect that the problem is marked correct
self.assertEqual(result['success'], 'incorrect')
# Expect that the number of attempts is incremented by 1
self.assertEqual(module.attempts, 1)
def test_check_problem_closed(self):
module = CapaFactory.create(attempts=3)
# Problem closed -- cannot submit
# Simulate that CapaModule.closed() always returns True
with patch('xmodule.capa_module.CapaModule.closed') as mock_closed:
mock_closed.return_value = True
with self.assertRaises(xmodule.exceptions.NotFoundError):
get_request_dict = {CapaFactory.input_key(): '3.14'}
module.check_problem(get_request_dict)
# Expect that number of attempts NOT incremented
self.assertEqual(module.attempts, 3)
def test_check_problem_resubmitted_with_randomize(self):
rerandomize_values = ['always', 'true']
for rerandomize in rerandomize_values:
# Randomize turned on
module = CapaFactory.create(rerandomize=rerandomize, attempts=0)
# Simulate that the problem is completed
module.done = True
# Expect that we cannot submit
with self.assertRaises(xmodule.exceptions.NotFoundError):
get_request_dict = {CapaFactory.input_key(): '3.14'}
module.check_problem(get_request_dict)
# Expect that number of attempts NOT incremented
self.assertEqual(module.attempts, 0)
def test_check_problem_resubmitted_no_randomize(self):
rerandomize_values = ['never', 'false', 'per_student']
for rerandomize in rerandomize_values:
# Randomize turned off
module = CapaFactory.create(rerandomize=rerandomize, attempts=0, done=True)
# Expect that we can submit successfully
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.check_problem(get_request_dict)
self.assertEqual(result['success'], 'correct')
# Expect that number of attempts IS incremented
self.assertEqual(module.attempts, 1)
def test_check_problem_queued(self):
module = CapaFactory.create(attempts=1)
# Simulate that the problem is queued
with patch('capa.capa_problem.LoncapaProblem.is_queued') \
as mock_is_queued, \
patch('capa.capa_problem.LoncapaProblem.get_recentmost_queuetime') \
as mock_get_queuetime:
mock_is_queued.return_value = True
mock_get_queuetime.return_value = datetime.datetime.now(UTC)
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.check_problem(get_request_dict)
# Expect an AJAX alert message in 'success'
self.assertTrue('You must wait' in result['success'])
# Expect that the number of attempts is NOT incremented
self.assertEqual(module.attempts, 1)
def test_check_problem_with_files(self):
# Check a problem with uploaded files, using the check_problem API.
# pylint: disable=W0212
# The files we'll be uploading.
fnames = ["prog1.py", "prog2.py", "prog3.py"]
fpaths = [os.path.join(DATA_DIR, "capa", fname) for fname in fnames]
fileobjs = [open(fpath) for fpath in fpaths]
for fileobj in fileobjs:
self.addCleanup(fileobj.close)
module = CapaFactoryWithFiles.create()
# Mock the XQueueInterface.
xqueue_interface = XQueueInterface("http://example.com/xqueue", Mock())
xqueue_interface._http_post = Mock(return_value=(0, "ok"))
module.system.xqueue['interface'] = xqueue_interface
# Create a request dictionary for check_problem.
get_request_dict = {
CapaFactoryWithFiles.input_key(response_num=2): fileobjs,
CapaFactoryWithFiles.input_key(response_num=3): 'None',
}
module.check_problem(get_request_dict)
# _http_post is called like this:
# _http_post(
# 'http://example.com/xqueue/xqueue/submit/',
# {
# 'xqueue_header': '{"lms_key": "df34fb702620d7ae892866ba57572491", "lms_callback_url": "/", "queue_name": "BerkeleyX-cs188x"}',
# 'xqueue_body': '{"student_info": "{\\"anonymous_student_id\\": \\"student\\", \\"submission_time\\": \\"20131117183318\\"}", "grader_payload": "{\\"project\\": \\"p3\\"}", "student_response": ""}',
# },
# files={
# path(u'/home/ned/edx/edx-platform/common/test/data/uploads/asset.html'):
# <open file u'/home/ned/edx/edx-platform/common/test/data/uploads/asset.html', mode 'r' at 0x49c5f60>,
# path(u'/home/ned/edx/edx-platform/common/test/data/uploads/image.jpg'):
# <open file u'/home/ned/edx/edx-platform/common/test/data/uploads/image.jpg', mode 'r' at 0x49c56f0>,
# path(u'/home/ned/edx/edx-platform/common/test/data/uploads/textbook.pdf'):
# <open file u'/home/ned/edx/edx-platform/common/test/data/uploads/textbook.pdf', mode 'r' at 0x49c5a50>,
# },
# )
self.assertEqual(xqueue_interface._http_post.call_count, 1)
_, kwargs = xqueue_interface._http_post.call_args
self.assertItemsEqual(fpaths, kwargs['files'].keys())
for fpath, fileobj in kwargs['files'].iteritems():
self.assertEqual(fpath, fileobj.name)
def test_check_problem_with_files_as_xblock(self):
# Check a problem with uploaded files, using the XBlock API.
# pylint: disable=W0212
# The files we'll be uploading.
fnames = ["prog1.py", "prog2.py", "prog3.py"]
fpaths = [os.path.join(DATA_DIR, "capa", fname) for fname in fnames]
fileobjs = [open(fpath) for fpath in fpaths]
for fileobj in fileobjs:
self.addCleanup(fileobj.close)
module = CapaFactoryWithFiles.create()
# Mock the XQueueInterface.
xqueue_interface = XQueueInterface("http://example.com/xqueue", Mock())
xqueue_interface._http_post = Mock(return_value=(0, "ok"))
module.system.xqueue['interface'] = xqueue_interface
# Create a webob Request with the files uploaded.
post_data = []
for fname, fileobj in zip(fnames, fileobjs):
post_data.append((CapaFactoryWithFiles.input_key(response_num=2), (fname, fileobj)))
post_data.append((CapaFactoryWithFiles.input_key(response_num=3), 'None'))
request = webob.Request.blank("/some/fake/url", POST=post_data, content_type='multipart/form-data')
module.handle('xmodule_handler', request, 'problem_check')
self.assertEqual(xqueue_interface._http_post.call_count, 1)
_, kwargs = xqueue_interface._http_post.call_args
self.assertItemsEqual(fnames, kwargs['files'].keys())
for fpath, fileobj in kwargs['files'].iteritems():
self.assertEqual(fpath, fileobj.name)
def test_check_problem_error(self):
# Try each exception that capa_module should handle
exception_classes = [StudentInputError,
LoncapaProblemError,
ResponseError]
for exception_class in exception_classes:
# Create the module
module = CapaFactory.create(attempts=1)
# Ensure that the user is NOT staff
module.system.user_is_staff = False
# Simulate answering a problem that raises the exception
with patch('capa.capa_problem.LoncapaProblem.grade_answers') as mock_grade:
mock_grade.side_effect = exception_class('test error')
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.check_problem(get_request_dict)
# Expect an AJAX alert message in 'success'
expected_msg = 'Error: test error'
self.assertEqual(expected_msg, result['success'])
# Expect that the number of attempts is NOT incremented
self.assertEqual(module.attempts, 1)
def test_check_problem_other_errors(self):
"""
Test that errors other than the expected kinds give an appropriate message.
See also `test_check_problem_error` for the "expected kinds" or errors.
"""
# Create the module
module = CapaFactory.create(attempts=1)
# Ensure that the user is NOT staff
module.system.user_is_staff = False
# Ensure that DEBUG is on
module.system.DEBUG = True
# Simulate answering a problem that raises the exception
with patch('capa.capa_problem.LoncapaProblem.grade_answers') as mock_grade:
error_msg = u"Superterrible error happened: ☠"
mock_grade.side_effect = Exception(error_msg)
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.check_problem(get_request_dict)
# Expect an AJAX alert message in 'success'
self.assertTrue(error_msg in result['success'])
def test_check_problem_error_nonascii(self):
# Try each exception that capa_module should handle
exception_classes = [StudentInputError,
LoncapaProblemError,
ResponseError]
for exception_class in exception_classes:
# Create the module
module = CapaFactory.create(attempts=1)
# Ensure that the user is NOT staff
module.system.user_is_staff = False
# Simulate answering a problem that raises the exception
with patch('capa.capa_problem.LoncapaProblem.grade_answers') as mock_grade:
mock_grade.side_effect = exception_class(u"ȧƈƈḗƞŧḗḓ ŧḗẋŧ ƒǿř ŧḗşŧīƞɠ")
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.check_problem(get_request_dict)
# Expect an AJAX alert message in 'success'
expected_msg = u'Error: ȧƈƈḗƞŧḗḓ ŧḗẋŧ ƒǿř ŧḗşŧīƞɠ'
self.assertEqual(expected_msg, result['success'])
# Expect that the number of attempts is NOT incremented
self.assertEqual(module.attempts, 1)
def test_check_problem_error_with_staff_user(self):
# Try each exception that capa module should handle
for exception_class in [StudentInputError,
LoncapaProblemError,
ResponseError]:
# Create the module
module = CapaFactory.create(attempts=1)
# Ensure that the user IS staff
module.system.user_is_staff = True
# Simulate answering a problem that raises an exception
with patch('capa.capa_problem.LoncapaProblem.grade_answers') as mock_grade:
mock_grade.side_effect = exception_class('test error')
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.check_problem(get_request_dict)
# Expect an AJAX alert message in 'success'
self.assertTrue('test error' in result['success'])
# We DO include traceback information for staff users
self.assertTrue('Traceback' in result['success'])
# Expect that the number of attempts is NOT incremented
self.assertEqual(module.attempts, 1)
def test_reset_problem(self):
module = CapaFactory.create(done=True)
module.new_lcp = Mock(wraps=module.new_lcp)
module.choose_new_seed = Mock(wraps=module.choose_new_seed)
# Stub out HTML rendering
with patch('xmodule.capa_module.CapaModule.get_problem_html') as mock_html:
mock_html.return_value = "<div>Test HTML</div>"
# Reset the problem
get_request_dict = {}
result = module.reset_problem(get_request_dict)
# Expect that the request was successful
self.assertTrue('success' in result and result['success'])
# Expect that the problem HTML is retrieved
self.assertTrue('html' in result)
self.assertEqual(result['html'], "<div>Test HTML</div>")
# Expect that the problem was reset
module.new_lcp.assert_called_once_with(None)
def test_reset_problem_closed(self):
# pre studio default
module = CapaFactory.create(rerandomize="always")
# Simulate that the problem is closed
with patch('xmodule.capa_module.CapaModule.closed') as mock_closed:
mock_closed.return_value = True
# Try to reset the problem
get_request_dict = {}
result = module.reset_problem(get_request_dict)
# Expect that the problem was NOT reset
self.assertTrue('success' in result and not result['success'])
def test_reset_problem_not_done(self):
# Simulate that the problem is NOT done
module = CapaFactory.create(done=False)
# Try to reset the problem
get_request_dict = {}
result = module.reset_problem(get_request_dict)
# Expect that the problem was NOT reset
self.assertTrue('success' in result and not result['success'])
def test_rescore_problem_correct(self):
module = CapaFactory.create(attempts=1, done=True)
# Simulate that all answers are marked correct, no matter
# what the input is, by patching LoncapaResponse.evaluate_answers()
with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers:
mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'correct')
result = module.rescore_problem()
# Expect that the problem is marked correct
self.assertEqual(result['success'], 'correct')
# Expect that we get no HTML
self.assertFalse('contents' in result)
# Expect that the number of attempts is not incremented
self.assertEqual(module.attempts, 1)
def test_rescore_problem_incorrect(self):
# make sure it also works when attempts have been reset,
# so add this to the test:
module = CapaFactory.create(attempts=0, done=True)
# Simulate that all answers are marked incorrect, no matter
# what the input is, by patching LoncapaResponse.evaluate_answers()
with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers:
mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'incorrect')
result = module.rescore_problem()
# Expect that the problem is marked incorrect
self.assertEqual(result['success'], 'incorrect')
# Expect that the number of attempts is not incremented
self.assertEqual(module.attempts, 0)
def test_rescore_problem_not_done(self):
# Simulate that the problem is NOT done
module = CapaFactory.create(done=False)
# Try to rescore the problem, and get exception
with self.assertRaises(xmodule.exceptions.NotFoundError):
module.rescore_problem()
def test_rescore_problem_not_supported(self):
module = CapaFactory.create(done=True)
# Try to rescore the problem, and get exception
with patch('capa.capa_problem.LoncapaProblem.supports_rescoring') as mock_supports_rescoring:
mock_supports_rescoring.return_value = False
with self.assertRaises(NotImplementedError):
module.rescore_problem()
def _rescore_problem_error_helper(self, exception_class):
"""Helper to allow testing all errors that rescoring might return."""
# Create the module
module = CapaFactory.create(attempts=1, done=True)
# Simulate answering a problem that raises the exception
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
mock_rescore.side_effect = exception_class(u'test error \u03a9')
result = module.rescore_problem()
# Expect an AJAX alert message in 'success'
expected_msg = u'Error: test error \u03a9'
self.assertEqual(result['success'], expected_msg)
# Expect that the number of attempts is NOT incremented
self.assertEqual(module.attempts, 1)
def test_rescore_problem_student_input_error(self):
self._rescore_problem_error_helper(StudentInputError)
def test_rescore_problem_problem_error(self):
self._rescore_problem_error_helper(LoncapaProblemError)
def test_rescore_problem_response_error(self):
self._rescore_problem_error_helper(ResponseError)
def test_save_problem(self):
module = CapaFactory.create(done=False)
# Save the problem
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.save_problem(get_request_dict)
# Expect that answers are saved to the problem
expected_answers = {CapaFactory.answer_key(): '3.14'}
self.assertEqual(module.lcp.student_answers, expected_answers)
# Expect that the result is success
self.assertTrue('success' in result and result['success'])
def test_save_problem_closed(self):
module = CapaFactory.create(done=False)
# Simulate that the problem is closed
with patch('xmodule.capa_module.CapaModule.closed') as mock_closed:
mock_closed.return_value = True
# Try to save the problem
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.save_problem(get_request_dict)
# Expect that the result is failure
self.assertTrue('success' in result and not result['success'])
def test_save_problem_submitted_with_randomize(self):
# Capa XModule treats 'always' and 'true' equivalently
rerandomize_values = ['always', 'true']
for rerandomize in rerandomize_values:
module = CapaFactory.create(rerandomize=rerandomize, done=True)
# Try to save
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.save_problem(get_request_dict)
# Expect that we cannot save
self.assertTrue('success' in result and not result['success'])
def test_save_problem_submitted_no_randomize(self):
# Capa XModule treats 'false' and 'per_student' equivalently
rerandomize_values = ['never', 'false', 'per_student']
for rerandomize in rerandomize_values:
module = CapaFactory.create(rerandomize=rerandomize, done=True)
# Try to save
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.save_problem(get_request_dict)
# Expect that we succeed
self.assertTrue('success' in result and result['success'])
def test_check_button_name(self):
# If last attempt, button name changes to "Final Check"
# Just in case, we also check what happens if we have
# more attempts than allowed.
attempts = random.randint(1, 10)
module = CapaFactory.create(attempts=attempts - 1, max_attempts=attempts)
self.assertEqual(module.check_button_name(), "Final Check")
module = CapaFactory.create(attempts=attempts, max_attempts=attempts)
self.assertEqual(module.check_button_name(), "Final Check")
module = CapaFactory.create(attempts=attempts + 1, max_attempts=attempts)
self.assertEqual(module.check_button_name(), "Final Check")
# Otherwise, button name is "Check"
module = CapaFactory.create(attempts=attempts - 2, max_attempts=attempts)
self.assertEqual(module.check_button_name(), "Check")
module = CapaFactory.create(attempts=attempts - 3, max_attempts=attempts)
self.assertEqual(module.check_button_name(), "Check")
# If no limit on attempts, then always show "Check"
module = CapaFactory.create(attempts=attempts - 3)
self.assertEqual(module.check_button_name(), "Check")
module = CapaFactory.create(attempts=0)
self.assertEqual(module.check_button_name(), "Check")
def test_check_button_checking_name(self):
module = CapaFactory.create(attempts=1, max_attempts=10)
self.assertEqual(module.check_button_checking_name(), "Checking...")
module = CapaFactory.create(attempts=10, max_attempts=10)
self.assertEqual(module.check_button_checking_name(), "Checking...")
def test_check_button_name_customization(self):
module = CapaFactory.create(
attempts=1,
max_attempts=10,
text_customization={"custom_check": "Submit", "custom_final_check": "Final Submit"}
)
self.assertEqual(module.check_button_name(), "Submit")
module = CapaFactory.create(attempts=9,
max_attempts=10,
text_customization={"custom_check": "Submit", "custom_final_check": "Final Submit"}
)
self.assertEqual(module.check_button_name(), "Final Submit")
def test_check_button_checking_name_customization(self):
module = CapaFactory.create(
attempts=1,
max_attempts=10,
text_customization={
"custom_check": "Submit",
"custom_final_check": "Final Submit",
"custom_checking": "Checking..."
}
)
self.assertEqual(module.check_button_checking_name(), "Checking...")
module = CapaFactory.create(
attempts=9,
max_attempts=10,
text_customization={
"custom_check": "Submit",
"custom_final_check": "Final Submit",
"custom_checking": "Checking..."
}
)
self.assertEqual(module.check_button_checking_name(), "Checking...")
def test_should_show_check_button(self):
attempts = random.randint(1, 10)
# If we're after the deadline, do NOT show check button
module = CapaFactory.create(due=self.yesterday_str)
self.assertFalse(module.should_show_check_button())
# If user is out of attempts, do NOT show the check button
module = CapaFactory.create(attempts=attempts, max_attempts=attempts)
self.assertFalse(module.should_show_check_button())
# If survey question (max_attempts = 0), do NOT show the check button
module = CapaFactory.create(max_attempts=0)
self.assertFalse(module.should_show_check_button())
# If user submitted a problem but hasn't reset,
# do NOT show the check button
# Note: we can only reset when rerandomize="always" or "true"
module = CapaFactory.create(rerandomize="always", done=True)
self.assertFalse(module.should_show_check_button())
module = CapaFactory.create(rerandomize="true", done=True)
self.assertFalse(module.should_show_check_button())
# Otherwise, DO show the check button
module = CapaFactory.create()
self.assertTrue(module.should_show_check_button())
# If the user has submitted the problem
# and we do NOT have a reset button, then we can show the check button
# Setting rerandomize to "never" or "false" ensures that the reset button
# is not shown
module = CapaFactory.create(rerandomize="never", done=True)
self.assertTrue(module.should_show_check_button())
module = CapaFactory.create(rerandomize="false", done=True)
self.assertTrue(module.should_show_check_button())
module = CapaFactory.create(rerandomize="per_student", done=True)
self.assertTrue(module.should_show_check_button())
def test_should_show_reset_button(self):
attempts = random.randint(1, 10)
# If we're after the deadline, do NOT show the reset button
module = CapaFactory.create(due=self.yesterday_str, done=True)
self.assertFalse(module.should_show_reset_button())
# If the user is out of attempts, do NOT show the reset button
module = CapaFactory.create(attempts=attempts, max_attempts=attempts, done=True)
self.assertFalse(module.should_show_reset_button())
# If we're NOT randomizing, then do NOT show the reset button
module = CapaFactory.create(rerandomize="never", done=True)
self.assertFalse(module.should_show_reset_button())
# If we're NOT randomizing, then do NOT show the reset button
module = CapaFactory.create(rerandomize="per_student", done=True)
self.assertFalse(module.should_show_reset_button())
# If we're NOT randomizing, then do NOT show the reset button
module = CapaFactory.create(rerandomize="false", done=True)
self.assertFalse(module.should_show_reset_button())
# If the user hasn't submitted an answer yet,
# then do NOT show the reset button
module = CapaFactory.create(done=False)
self.assertFalse(module.should_show_reset_button())
# pre studio default value, DO show the reset button
module = CapaFactory.create(rerandomize="always", done=True)
self.assertTrue(module.should_show_reset_button())
# If survey question for capa (max_attempts = 0),
# DO show the reset button
module = CapaFactory.create(rerandomize="always", max_attempts=0, done=True)
self.assertTrue(module.should_show_reset_button())
def test_should_show_save_button(self):
attempts = random.randint(1, 10)
# If we're after the deadline, do NOT show the save button
module = CapaFactory.create(due=self.yesterday_str, done=True)
self.assertFalse(module.should_show_save_button())
# If the user is out of attempts, do NOT show the save button
module = CapaFactory.create(attempts=attempts, max_attempts=attempts, done=True)
self.assertFalse(module.should_show_save_button())
# If user submitted a problem but hasn't reset, do NOT show the save button
module = CapaFactory.create(rerandomize="always", done=True)
self.assertFalse(module.should_show_save_button())
module = CapaFactory.create(rerandomize="true", done=True)
self.assertFalse(module.should_show_save_button())
# If the user has unlimited attempts and we are not randomizing,
# then do NOT show a save button
# because they can keep using "Check"
module = CapaFactory.create(max_attempts=None, rerandomize="never", done=False)
self.assertFalse(module.should_show_save_button())
module = CapaFactory.create(max_attempts=None, rerandomize="false", done=True)
self.assertFalse(module.should_show_save_button())
module = CapaFactory.create(max_attempts=None, rerandomize="per_student", done=True)
self.assertFalse(module.should_show_save_button())
# pre-studio default, DO show the save button
module = CapaFactory.create(rerandomize="always", done=False)
self.assertTrue(module.should_show_save_button())
# If we're not randomizing and we have limited attempts, then we can save
module = CapaFactory.create(rerandomize="never", max_attempts=2, done=True)
self.assertTrue(module.should_show_save_button())
module = CapaFactory.create(rerandomize="false", max_attempts=2, done=True)
self.assertTrue(module.should_show_save_button())
module = CapaFactory.create(rerandomize="per_student", max_attempts=2, done=True)
self.assertTrue(module.should_show_save_button())
# If survey question for capa (max_attempts = 0),
# DO show the save button
module = CapaFactory.create(max_attempts=0, done=False)
self.assertTrue(module.should_show_save_button())
def test_should_show_save_button_force_save_button(self):
# If we're after the deadline, do NOT show the save button
# even though we're forcing a save
module = CapaFactory.create(due=self.yesterday_str,
force_save_button="true",
done=True)
self.assertFalse(module.should_show_save_button())
# If the user is out of attempts, do NOT show the save button
attempts = random.randint(1, 10)
module = CapaFactory.create(attempts=attempts,
max_attempts=attempts,
force_save_button="true",
done=True)
self.assertFalse(module.should_show_save_button())
# Otherwise, if we force the save button,
# then show it even if we would ordinarily
# require a reset first
module = CapaFactory.create(force_save_button="true",
rerandomize="always",
done=True)
self.assertTrue(module.should_show_save_button())
module = CapaFactory.create(force_save_button="true",
rerandomize="true",
done=True)
self.assertTrue(module.should_show_save_button())
def test_no_max_attempts(self):
module = CapaFactory.create(max_attempts='')
html = module.get_problem_html()
self.assertTrue(html is not None)
# assert that we got here without exploding
def test_get_problem_html(self):
module = CapaFactory.create()
# We've tested the show/hide button logic in other tests,
# so here we hard-wire the values
show_check_button = bool(random.randint(0, 1) % 2)
show_reset_button = bool(random.randint(0, 1) % 2)
show_save_button = bool(random.randint(0, 1) % 2)
module.should_show_check_button = Mock(return_value=show_check_button)
module.should_show_reset_button = Mock(return_value=show_reset_button)
module.should_show_save_button = Mock(return_value=show_save_button)
# Mock the system rendering function
module.system.render_template = Mock(return_value="<div>Test Template HTML</div>")
# Patch the capa problem's HTML rendering
with patch('capa.capa_problem.LoncapaProblem.get_html') as mock_html:
mock_html.return_value = "<div>Test Problem HTML</div>"
# Render the problem HTML
html = module.get_problem_html(encapsulate=False)
# Also render the problem encapsulated in a <div>
html_encapsulated = module.get_problem_html(encapsulate=True)
# Expect that we get the rendered template back
self.assertEqual(html, "<div>Test Template HTML</div>")
# Check the rendering context
render_args, _ = module.system.render_template.call_args
self.assertEqual(len(render_args), 2)
template_name = render_args[0]
self.assertEqual(template_name, "problem.html")
context = render_args[1]
self.assertEqual(context['problem']['html'], "<div>Test Problem HTML</div>")
self.assertEqual(bool(context['check_button']), show_check_button)
self.assertEqual(bool(context['reset_button']), show_reset_button)
self.assertEqual(bool(context['save_button']), show_save_button)
# Assert that the encapsulated html contains the original html
self.assertTrue(html in html_encapsulated)
def test_input_state_consistency(self):
module1 = CapaFactory.create()
module2 = CapaFactory.create()
# check to make sure that the input_state and the keys have the same values
module1.set_state_from_lcp()
self.assertEqual(module1.lcp.inputs.keys(), module1.input_state.keys())
module2.set_state_from_lcp()
intersection = set(module2.input_state.keys()).intersection(set(module1.input_state.keys()))
self.assertEqual(len(intersection), 0)
def test_get_problem_html_error(self):
"""
In production, when an error occurs with the problem HTML
rendering, a "dummy" problem is created with an error
message to display to the user.
"""
module = CapaFactory.create()
# Save the original problem so we can compare it later
original_problem = module.lcp
# Simulate throwing an exception when the capa problem
# is asked to render itself as HTML
module.lcp.get_html = Mock(side_effect=Exception("Test"))
# Stub out the get_test_system rendering function
module.system.render_template = Mock(return_value="<div>Test Template HTML</div>")
# Turn off DEBUG
module.system.DEBUG = False
# Try to render the module with DEBUG turned off
html = module.get_problem_html()
self.assertTrue(html is not None)
# Check the rendering context
render_args, _ = module.system.render_template.call_args
context = render_args[1]
self.assertTrue("error" in context['problem']['html'])
# Expect that the module has created a new dummy problem with the error
self.assertNotEqual(original_problem, module.lcp)
def test_get_problem_html_error_w_debug(self):
"""
Test the html response when an error occurs with DEBUG on
"""
module = CapaFactory.create()
# Simulate throwing an exception when the capa problem
# is asked to render itself as HTML
error_msg = u"Superterrible error happened: ☠"
module.lcp.get_html = Mock(side_effect=Exception(error_msg))
# Stub out the get_test_system rendering function
module.system.render_template = Mock(return_value="<div>Test Template HTML</div>")
# Make sure DEBUG is on
module.system.DEBUG = True
# Try to render the module with DEBUG turned on
html = module.get_problem_html()
self.assertTrue(html is not None)
# Check the rendering context
render_args, _ = module.system.render_template.call_args
context = render_args[1]
self.assertTrue(error_msg in context['problem']['html'])
def test_random_seed_no_change(self):
# Run the test for each possible rerandomize value
for rerandomize in ['false', 'never',
'per_student', 'always',
'true', 'onreset']:
module = CapaFactory.create(rerandomize=rerandomize)
# Get the seed
# By this point, the module should have persisted the seed
seed = module.seed
self.assertTrue(seed is not None)
# If we're not rerandomizing, the seed is always set
# to the same value (1)
if rerandomize in ['never']:
self.assertEqual(seed, 1,
msg="Seed should always be 1 when rerandomize='%s'" % rerandomize)
# Check the problem
get_request_dict = {CapaFactory.input_key(): '3.14'}
module.check_problem(get_request_dict)
# Expect that the seed is the same
self.assertEqual(seed, module.seed)
# Save the problem
module.save_problem(get_request_dict)
# Expect that the seed is the same
self.assertEqual(seed, module.seed)
def test_random_seed_with_reset(self):
def _reset_and_get_seed(module):
'''
Reset the XModule and return the module's seed
'''
# Simulate submitting an attempt
# We need to do this, or reset_problem() will
# fail with a complaint that we haven't submitted
# the problem yet.
module.done = True
# Reset the problem
module.reset_problem({})
# Return the seed
return module.seed
def _retry_and_check(num_tries, test_func):
'''
Returns True if *test_func* was successful
(returned True) within *num_tries* attempts
*test_func* must be a function
of the form test_func() -> bool
'''
success = False
for i in range(num_tries):
if test_func() is True:
success = True
break
return success
# Run the test for each possible rerandomize value
for rerandomize in ['never', 'false', 'per_student',
'always', 'true', 'onreset']:
module = CapaFactory.create(rerandomize=rerandomize)
# Get the seed
# By this point, the module should have persisted the seed
seed = module.seed
self.assertTrue(seed is not None)
# We do NOT want the seed to reset if rerandomize
# is set to 'never' -- it should still be 1
# The seed also stays the same if we're randomizing
# 'per_student': the same student should see the same problem
if rerandomize in ['never', 'false', 'per_student']:
self.assertEqual(seed, _reset_and_get_seed(module))
# Otherwise, we expect the seed to change
# to another valid seed
else:
# Since there's a small chance we might get the
# same seed again, give it 5 chances
# to generate a different seed
success = _retry_and_check(5, lambda: _reset_and_get_seed(module) != seed)
self.assertTrue(module.seed is not None)
msg = 'Could not get a new seed from reset after 5 tries'
self.assertTrue(success, msg)
def test_random_seed_bins(self):
# Assert that we are limiting the number of possible seeds.
# Check the conditions that generate random seeds
for rerandomize in ['always', 'per_student', 'true', 'onreset']:
# Get a bunch of seeds, they should all be in 0-999.
for i in range(200):
module = CapaFactory.create(rerandomize=rerandomize)
assert 0 <= module.seed < 1000
@patch('xmodule.capa_base.log')
@patch('xmodule.capa_base.Progress')
def test_get_progress_error(self, mock_progress, mock_log):
"""
Check that an exception given in `Progress` produces a `log.exception` call.
"""
error_types = [TypeError, ValueError]
for error_type in error_types:
mock_progress.side_effect = error_type
module = CapaFactory.create()
self.assertIsNone(module.get_progress())
mock_log.exception.assert_called_once_with('Got bad progress')
mock_log.reset_mock()
@patch('xmodule.capa_base.Progress')
def test_get_progress_no_error_if_weight_zero(self, mock_progress):
"""
Check that if the weight is 0 get_progress does not try to create a Progress object.
"""
mock_progress.return_value = True
module = CapaFactory.create()
module.weight = 0
progress = module.get_progress()
self.assertIsNone(progress)
self.assertFalse(mock_progress.called)
@patch('xmodule.capa_base.Progress')
def test_get_progress_calculate_progress_fraction(self, mock_progress):
"""
Check that score and total are calculated correctly for the progress fraction.
"""
module = CapaFactory.create()
module.weight = 1
module.get_progress()
mock_progress.assert_called_with(0, 1)
other_module = CapaFactory.create(correct=True)
other_module.weight = 1
other_module.get_progress()
mock_progress.assert_called_with(1, 1)
def test_get_html(self):
"""
Check that get_html() calls get_progress() with no arguments.
"""
module = CapaFactory.create()
module.get_progress = Mock(wraps=module.get_progress)
module.get_html()
module.get_progress.assert_called_once_with()
def test_get_problem(self):
"""
Check that get_problem() returns the expected dictionary.
"""
module = CapaFactory.create()
self.assertEquals(module.get_problem("data"), {'html': module.get_problem_html(encapsulate=False)})
# Standard question with shuffle="true" used by a few tests
common_shuffle_xml = textwrap.dedent("""
<problem>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" shuffle="true">
<choice correct="false">Apple</choice>
<choice correct="false">Banana</choice>
<choice correct="false">Chocolate</choice>
<choice correct ="true">Donut</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
def test_check_unmask(self):
"""
Check that shuffle unmasking is plumbed through: when check_problem is called,
unmasked names should appear in the track_function event_info.
"""
module = CapaFactory.create(xml=self.common_shuffle_xml)
with patch.object(module.runtime, 'track_function') as mock_track_function:
get_request_dict = {CapaFactory.input_key(): 'choice_3'} # the correct choice
module.check_problem(get_request_dict)
mock_call = mock_track_function.mock_calls[0]
event_info = mock_call[1][1]
self.assertEqual(event_info['answers'][CapaFactory.answer_key()], 'choice_3')
# 'permutation' key added to record how problem was shown
self.assertEquals(event_info['permutation'][CapaFactory.answer_key()],
('shuffle', ['choice_3', 'choice_1', 'choice_2', 'choice_0']))
self.assertEquals(event_info['success'], 'correct')
@unittest.skip("masking temporarily disabled")
def test_save_unmask(self):
"""On problem save, unmasked data should appear on track_function."""
module = CapaFactory.create(xml=self.common_shuffle_xml)
with patch.object(module.runtime, 'track_function') as mock_track_function:
get_request_dict = {CapaFactory.input_key(): 'mask_0'}
module.save_problem(get_request_dict)
mock_call = mock_track_function.mock_calls[0]
event_info = mock_call[1][1]
self.assertEquals(event_info['answers'][CapaFactory.answer_key()], 'choice_2')
self.assertIsNotNone(event_info['permutation'][CapaFactory.answer_key()])
@unittest.skip("masking temporarily disabled")
def test_reset_unmask(self):
"""On problem reset, unmask names should appear track_function."""
module = CapaFactory.create(xml=self.common_shuffle_xml)
get_request_dict = {CapaFactory.input_key(): 'mask_0'}
module.check_problem(get_request_dict)
# On reset, 'old_state' should use unmasked names
with patch.object(module.runtime, 'track_function') as mock_track_function:
module.reset_problem(None)
mock_call = mock_track_function.mock_calls[0]
event_info = mock_call[1][1]
self.assertEquals(mock_call[1][0], 'reset_problem')
self.assertEquals(event_info['old_state']['student_answers'][CapaFactory.answer_key()], 'choice_2')
self.assertIsNotNone(event_info['permutation'][CapaFactory.answer_key()])
@unittest.skip("masking temporarily disabled")
def test_rescore_unmask(self):
"""On problem rescore, unmasked names should appear on track_function."""
module = CapaFactory.create(xml=self.common_shuffle_xml)
get_request_dict = {CapaFactory.input_key(): 'mask_0'}
module.check_problem(get_request_dict)
# On rescore, state/student_answers should use unmasked names
with patch.object(module.runtime, 'track_function') as mock_track_function:
module.rescore_problem()
mock_call = mock_track_function.mock_calls[0]
event_info = mock_call[1][1]
self.assertEquals(mock_call[1][0], 'problem_rescore')
self.assertEquals(event_info['state']['student_answers'][CapaFactory.answer_key()], 'choice_2')
self.assertIsNotNone(event_info['permutation'][CapaFactory.answer_key()])
def test_check_unmask_answerpool(self):
"""Check answer-pool question track_function uses unmasked names"""
xml = textwrap.dedent("""
<problem>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="4">
<choice correct="false">Apple</choice>
<choice correct="false">Banana</choice>
<choice correct="false">Chocolate</choice>
<choice correct ="true">Donut</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
module = CapaFactory.create(xml=xml)
with patch.object(module.runtime, 'track_function') as mock_track_function:
get_request_dict = {CapaFactory.input_key(): 'choice_2'} # mask_X form when masking enabled
module.check_problem(get_request_dict)
mock_call = mock_track_function.mock_calls[0]
event_info = mock_call[1][1]
self.assertEqual(event_info['answers'][CapaFactory.answer_key()], 'choice_2')
# 'permutation' key added to record how problem was shown
self.assertEquals(event_info['permutation'][CapaFactory.answer_key()],
('answerpool', ['choice_1', 'choice_3', 'choice_2', 'choice_0']))
self.assertEquals(event_info['success'], 'incorrect')
class ComplexEncoderTest(unittest.TestCase):
def test_default(self):
"""
Check that complex numbers can be encoded into JSON.
"""
complex_num = 1 - 1j
expected_str = '1-1*j'
json_str = json.dumps(complex_num, cls=ComplexEncoder)
self.assertEqual(expected_str, json_str[1:-1]) # ignore quotes
class TestProblemCheckTracking(unittest.TestCase):
"""
Ensure correct tracking information is included in events emitted during problem checks.
"""
def setUp(self):
self.maxDiff = None
def test_choice_answer_text(self):
factory = self.capa_factory_for_problem_xml("""\
<problem display_name="Multiple Choice Questions">
<p>What color is the open ocean on a sunny day?</p>
<optionresponse>
<optioninput options="('yellow','blue','green')" correct="blue" label="What color is the open ocean on a sunny day?"/>
</optionresponse>
<p>Which piece of furniture is built for sitting?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">
<text>a table</text>
</choice>
<choice correct="false">
<text>a desk</text>
</choice>
<choice correct="true">
<text>a chair</text>
</choice>
<choice correct="false">
<text>a bookshelf</text>
</choice>
</choicegroup>
</multiplechoiceresponse>
<p>Which of the following are musical instruments?</p>
<choiceresponse>
<checkboxgroup direction="vertical" label="Which of the following are musical instruments?">
<choice correct="true">a piano</choice>
<choice correct="false">a tree</choice>
<choice correct="true">a guitar</choice>
<choice correct="false">a window</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""")
module = factory.create()
answer_input_dict = {
factory.input_key(2): 'blue',
factory.input_key(3): 'choice_0',
factory.input_key(4): ['choice_0', 'choice_1'],
}
event = self.get_event_for_answers(module, answer_input_dict)
self.assertEquals(event['submission'], {
factory.answer_key(2): {
'question': 'What color is the open ocean on a sunny day?',
'answer': 'blue',
'response_type': 'optionresponse',
'input_type': 'optioninput',
'correct': True,
'variant': '',
},
factory.answer_key(3): {
'question': '',
'answer': u'<text>a table</text>',
'response_type': 'multiplechoiceresponse',
'input_type': 'choicegroup',
'correct': False,
'variant': '',
},
factory.answer_key(4): {
'question': 'Which of the following are musical instruments?',
'answer': [u'a piano', u'a tree'],
'response_type': 'choiceresponse',
'input_type': 'checkboxgroup',
'correct': False,
'variant': '',
},
})
def capa_factory_for_problem_xml(self, xml):
class CustomCapaFactory(CapaFactory):
"""
A factory for creating a Capa problem with arbitrary xml.
"""
sample_problem_xml = textwrap.dedent(xml)
return CustomCapaFactory
def get_event_for_answers(self, module, answer_input_dict):
with patch.object(module.runtime, 'track_function') as mock_track_function:
module.check_problem(answer_input_dict)
self.assertEquals(len(mock_track_function.mock_calls), 1)
mock_call = mock_track_function.mock_calls[0]
event = mock_call[1][1]
return event
def test_numerical_textline(self):
factory = CapaFactory
module = factory.create()
answer_input_dict = {
factory.input_key(2): '3.14'
}
event = self.get_event_for_answers(module, answer_input_dict)
self.assertEquals(event['submission'], {
factory.answer_key(2): {
'question': '',
'answer': '3.14',
'response_type': 'numericalresponse',
'input_type': 'textline',
'correct': True,
'variant': '',
}
})
def test_multiple_inputs(self):
factory = self.capa_factory_for_problem_xml("""\
<problem display_name="Multiple Inputs">
<p>Choose the correct color</p>
<optionresponse>
<p>What color is the sky?</p>
<optioninput options="('yellow','blue','green')" correct="blue"/>
<p>What color are pine needles?</p>
<optioninput options="('yellow','blue','green')" correct="green"/>
</optionresponse>
</problem>
""")
module = factory.create()
answer_input_dict = {
factory.input_key(2, 1): 'blue',
factory.input_key(2, 2): 'yellow',
}
event = self.get_event_for_answers(module, answer_input_dict)
self.assertEquals(event['submission'], {
factory.answer_key(2, 1): {
'question': '',
'answer': 'blue',
'response_type': 'optionresponse',
'input_type': 'optioninput',
'correct': True,
'variant': '',
},
factory.answer_key(2, 2): {
'question': '',
'answer': 'yellow',
'response_type': 'optionresponse',
'input_type': 'optioninput',
'correct': False,
'variant': '',
},
})
def test_rerandomized_inputs(self):
factory = CapaFactory
module = factory.create(rerandomize='always')
answer_input_dict = {
factory.input_key(2): '3.14'
}
event = self.get_event_for_answers(module, answer_input_dict)
self.assertEquals(event['submission'], {
factory.answer_key(2): {
'question': '',
'answer': '3.14',
'response_type': 'numericalresponse',
'input_type': 'textline',
'correct': True,
'variant': module.seed,
}
})
def test_file_inputs(self):
fnames = ["prog1.py", "prog2.py", "prog3.py"]
fpaths = [os.path.join(DATA_DIR, "capa", fname) for fname in fnames]
fileobjs = [open(fpath) for fpath in fpaths]
for fileobj in fileobjs:
self.addCleanup(fileobj.close)
factory = CapaFactoryWithFiles
module = factory.create()
# Mock the XQueueInterface.
xqueue_interface = XQueueInterface("http://example.com/xqueue", Mock())
xqueue_interface._http_post = Mock(return_value=(0, "ok")) # pylint: disable=protected-access
module.system.xqueue['interface'] = xqueue_interface
answer_input_dict = {
CapaFactoryWithFiles.input_key(response_num=2): fileobjs,
CapaFactoryWithFiles.input_key(response_num=3): 'None',
}
event = self.get_event_for_answers(module, answer_input_dict)
self.assertEquals(event['submission'], {
factory.answer_key(2): {
'question': '',
'answer': fpaths,
'response_type': 'coderesponse',
'input_type': 'filesubmission',
'correct': False,
'variant': '',
},
factory.answer_key(3): {
'answer': 'None',
'correct': True,
'question': '',
'response_type': 'customresponse',
'input_type': 'textline',
'variant': ''
}
})
| agpl-3.0 |
henriquegemignani/py-gocd | docs/conf.py | 2 | 10117 | # -*- coding: utf-8 -*-
#
# GoCD API Client documentation build configuration file, created by
# sphinx-quickstart on Sun Aug 9 00:56:28 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
import gocd
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
autodoc_default_flags = [
'members',
'undoc-members',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GoCD API Client'
copyright = u'2015, Björn Andersson'
author = u'Björn Andersson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = gocd.__version__
# The full version, including alpha/beta/rc tags.
release = gocd.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'GoCDAPIClientdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'GoCDAPIClient.tex', u'GoCD API Client Documentation',
u'Björn Andersson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'gocdapiclient', u'GoCD API Client Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'GoCDAPIClient', u'GoCD API Client Documentation',
author, 'GoCDAPIClient', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
def maybe_skip_member(app, what, name, obj, skip, options):
if(name in ['run', 'trigger']
and obj.im_class.__name__ == 'Pipeline'
and obj.im_func.__name__ == 'schedule'):
# return False
# import ipdb; ipdb.set_trace()
print skip, options
return True
# print app, what, name, obj, skip, options
return skip
# def setup(app):
# app.connect('autodoc-skip-member', maybe_skip_member)
| mit |
rajiteh/taiga-back | tests/integration/resources_permissions/test_auth_resources.py | 24 | 1111 | from django.core.urlresolvers import reverse
from taiga.base.utils import json
from tests import factories as f
from tests.utils import disconnect_signals, reconnect_signals
import pytest
pytestmark = pytest.mark.django_db
def setup_module(module):
disconnect_signals()
def teardown_module(module):
reconnect_signals()
def test_auth_create(client):
url = reverse('auth-list')
user = f.UserFactory.create()
login_data = json.dumps({
"type": "normal",
"username": user.username,
"password": user.username,
})
result = client.post(url, login_data, content_type="application/json")
assert result.status_code == 200
def test_auth_action_register(client, settings):
settings.PUBLIC_REGISTER_ENABLED = True
url = reverse('auth-register')
register_data = json.dumps({
"type": "public",
"username": "test",
"password": "test",
"full_name": "test",
"email": "test@test.com",
})
result = client.post(url, register_data, content_type="application/json")
assert result.status_code == 201
| agpl-3.0 |
EmanueleCannizzaro/scons | test/Docbook/basic/html/html.py | 3 | 1724 | #!/usr/bin/env python
#
# Copyright (c) 2001-2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""
Test the HTML builder.
"""
import TestSCons
test = TestSCons.TestSCons()
try:
import libxml2
import libxslt
except:
try:
import lxml
except:
test.skip_test('Cannot find installed Python binding for libxml2 or lxml, skipping test.\n')
test.dir_fixture('image')
# Normal invocation
test.run()
test.must_exist(test.workpath('manual.html'))
# Cleanup
test.run(arguments='-c')
test.must_not_exist(test.workpath('manual.html'))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
joberreiter/pyload | module/plugins/accounts/FastixRu.py | 3 | 2037 | # -*- coding: utf-8 -*-
from module.plugins.internal.Account import Account
from module.common.json_layer import json_loads
class FastixRu(Account):
__name__ = "FastixRu"
__type__ = "account"
__version__ = "0.08"
__status__ = "testing"
__config__ = [("mh_mode" , "all;listed;unlisted", "Filter hosters to use" , "all"),
("mh_list" , "str" , "Hoster list (comma separated)", "" ),
("mh_interval", "int" , "Reload interval in minutes" , 60 )]
__description__ = """Fastix account plugin"""
__license__ = "GPLv3"
__authors__ = [("Massimo Rosamilia", "max@spiritix.eu")]
def grab_hosters(self, user, password, data):
html = self.load("http://fastix.ru/api_v2",
get={'apikey': "5182964c3f8f9a7f0b00000a_kelmFB4n1IrnCDYuIFn2y",
'sub' : "allowed_sources"})
host_list = json_loads(html)
host_list = host_list['allow']
return host_list
def grab_info(self, user, password, data):
html = json_loads(self.load("http://fastix.ru/api_v2/",
get={'apikey': data['apikey'],
'sub' : "getaccountdetails"}))
points = html['points']
kb = float(points) * 1024 ** 2 / 1000
if points > 0:
account_info = {'validuntil': -1, 'trafficleft': kb}
else:
account_info = {'validuntil': None, 'trafficleft': None, 'premium': False}
return account_info
def signin(self, user, password, data):
api = json_loads(self.load("https://fastix.ru/api_v2/",
get={'sub' : "get_apikey",
'email' : user,
'password': password}))
if 'error' in api:
self.fail_login(api['error_txt'])
else:
data['apikey'] = api['apikey']
| gpl-3.0 |
unioslo/cerebrum | Cerebrum/modules/bofhd/errors.py | 1 | 2338 | # -*- coding: utf-8 -*-
# Copyright 2002-2014 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Bofh client/server exceptions.
The errors defined in this class, are errors that the bofhd server can
communicate to the client.
All client implementations should be aware of these exception types.
"""
class CerebrumError(StandardError):
""" Signal a user-error. """
pass
class PermissionDenied(CerebrumError):
""" The operation was not permitted. """
pass
class UnknownError(CerebrumError):
""" An unknown error has occured. """
def __init__(self, type, value, msg=None):
""" Wrap a non-L{CerebrumError} in a L{CerebrumError} exception.
@type type: type
@param type: The exception class
@type value: Exception
@param value: The exception instance
@type msg: None or basestring
@param msg:
An additional error message. This message will be prepended to the
string value of this exception.
"""
self._type = type
self._value = value
self._msg = msg or ''
def __str__(self):
return "Unknown error (%s): %s" % (getattr(self._type, '__name__', ''),
self._msg)
class ServerRestartedError(CerebrumError):
""" Notify the client that the server has restarted.
When receiving this error, clients should flush any cached data.
"""
pass
class SessionExpiredError(CerebrumError):
""" Indicate that the C{session_id} is expired.
This happens when the received C{session_id} is unknown.
"""
pass
| gpl-2.0 |
Yukarumya/Yukarum-Redfoxes | media/webrtc/trunk/tools/gyp/test/mac/gyptest-postbuild-fail.py | 88 | 2218 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that a failing postbuild step lets the build fail.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
# set |match| to ignore build stderr output.
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'],
match = lambda a, b: True)
test.run_gyp('test.gyp', chdir='postbuild-fail')
build_error_code = {
'xcode': [1, 65], # 1 for xcode 3, 65 for xcode 4 (see `man sysexits`)
'make': 2,
'ninja': 1,
'xcode-ninja': [1, 65],
}[test.format]
# If a postbuild fails, all postbuilds should be re-run on the next build.
# In Xcode 3, even if the first postbuild fails the other postbuilds were
# still executed. In Xcode 4, postbuilds are stopped after the first
# failing postbuild. This test checks for the Xcode 4 behavior.
# Ignore this test on Xcode 3.
import subprocess
job = subprocess.Popen(['xcodebuild', '-version'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = job.communicate()
if job.returncode != 0:
print out
raise Exception('Error %d running xcodebuild' % job.returncode)
if out.startswith('Xcode 3.'):
test.pass_test()
# Non-bundles
test.build('test.gyp', 'nonbundle', chdir='postbuild-fail',
status=build_error_code)
test.built_file_must_not_exist('static_touch',
chdir='postbuild-fail')
# Check for non-up-to-date-ness by checking if building again produces an
# error.
test.build('test.gyp', 'nonbundle', chdir='postbuild-fail',
status=build_error_code)
# Bundles
test.build('test.gyp', 'bundle', chdir='postbuild-fail',
status=build_error_code)
test.built_file_must_not_exist('dynamic_touch',
chdir='postbuild-fail')
# Check for non-up-to-date-ness by checking if building again produces an
# error.
test.build('test.gyp', 'bundle', chdir='postbuild-fail',
status=build_error_code)
test.pass_test()
| mpl-2.0 |
kawamon/hue | desktop/core/ext-py/celery-4.2.1/t/unit/worker/test_request.py | 2 | 36038 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import numbers
import os
import signal
import socket
import sys
from datetime import datetime, timedelta
from time import time
import pytest
from billiard.einfo import ExceptionInfo
from case import Mock, patch
from kombu.utils.encoding import (default_encode, from_utf8, safe_repr,
safe_str)
from kombu.utils.uuid import uuid
from celery import states
from celery.app.trace import (TraceInfo, _trace_task_ret, build_tracer,
mro_lookup, reset_worker_optimizations,
setup_worker_optimizations, trace_task)
from celery.exceptions import (Ignore, InvalidTaskError, Reject, Retry,
TaskRevokedError, Terminated, WorkerLostError)
from celery.five import monotonic
from celery.signals import task_revoked
from celery.worker import request as module
from celery.worker import strategy
from celery.worker.request import Request, create_request_cls
from celery.worker.request import logger as req_logger
from celery.worker.state import revoked
class RequestCase:
def setup(self):
self.app.conf.result_serializer = 'pickle'
@self.app.task(shared=False)
def add(x, y, **kw_):
return x + y
self.add = add
@self.app.task(shared=False)
def mytask(i, **kwargs):
return i ** i
self.mytask = mytask
@self.app.task(shared=False)
def mytask_raising(i):
raise KeyError(i)
self.mytask_raising = mytask_raising
def xRequest(self, name=None, id=None, args=None, kwargs=None,
on_ack=None, on_reject=None, Request=Request, **head):
args = [1] if args is None else args
kwargs = {'f': 'x'} if kwargs is None else kwargs
on_ack = on_ack or Mock(name='on_ack')
on_reject = on_reject or Mock(name='on_reject')
message = self.TaskMessage(
name or self.mytask.name, id, args=args, kwargs=kwargs, **head
)
return Request(message, app=self.app,
on_ack=on_ack, on_reject=on_reject)
class test_mro_lookup:
def test_order(self):
class A(object):
pass
class B(A):
pass
class C(B):
pass
class D(C):
@classmethod
def mro(cls):
return ()
A.x = 10
assert mro_lookup(C, 'x') == A
assert mro_lookup(C, 'x', stop={A}) is None
B.x = 10
assert mro_lookup(C, 'x') == B
C.x = 10
assert mro_lookup(C, 'x') == C
assert mro_lookup(D, 'x') is None
def jail(app, task_id, name, args, kwargs):
request = {'id': task_id}
task = app.tasks[name]
task.__trace__ = None # rebuild
return trace_task(
task, task_id, args, kwargs, request=request, eager=False, app=app,
).retval
@pytest.mark.skipif(sys.version_info[0] > 3, reason='Py2 only')
class test_default_encode:
def test_jython(self):
prev, sys.platform = sys.platform, 'java 1.6.1'
try:
assert default_encode(b'foo') == b'foo'
finally:
sys.platform = prev
def test_cpython(self):
prev, sys.platform = sys.platform, 'darwin'
gfe, sys.getfilesystemencoding = (
sys.getfilesystemencoding,
lambda: 'utf-8',
)
try:
assert default_encode(b'foo') == b'foo'
finally:
sys.platform = prev
sys.getfilesystemencoding = gfe
class test_Retry:
def test_retry_semipredicate(self):
try:
raise Exception('foo')
except Exception as exc:
ret = Retry('Retrying task', exc)
assert ret.exc == exc
class test_trace_task(RequestCase):
def test_process_cleanup_fails(self, patching):
_logger = patching('celery.app.trace.logger')
self.mytask.backend = Mock()
self.mytask.backend.process_cleanup = Mock(side_effect=KeyError())
tid = uuid()
ret = jail(self.app, tid, self.mytask.name, [2], {})
assert ret == 4
self.mytask.backend.mark_as_done.assert_called()
assert 'Process cleanup failed' in _logger.error.call_args[0][0]
def test_process_cleanup_BaseException(self):
self.mytask.backend = Mock()
self.mytask.backend.process_cleanup = Mock(side_effect=SystemExit())
with pytest.raises(SystemExit):
jail(self.app, uuid(), self.mytask.name, [2], {})
def test_execute_jail_success(self):
ret = jail(self.app, uuid(), self.mytask.name, [2], {})
assert ret == 4
def test_marked_as_started(self):
_started = []
def store_result(tid, meta, state, **kwargs):
if state == states.STARTED:
_started.append(tid)
self.mytask.backend.store_result = Mock(name='store_result')
self.mytask.backend.store_result.side_effect = store_result
self.mytask.track_started = True
tid = uuid()
jail(self.app, tid, self.mytask.name, [2], {})
assert tid in _started
self.mytask.ignore_result = True
tid = uuid()
jail(self.app, tid, self.mytask.name, [2], {})
assert tid not in _started
def test_execute_jail_failure(self):
ret = jail(
self.app, uuid(), self.mytask_raising.name, [4], {},
)
assert isinstance(ret, ExceptionInfo)
assert ret.exception.args == (4,)
def test_execute_ignore_result(self):
@self.app.task(shared=False, ignore_result=True)
def ignores_result(i):
return i ** i
task_id = uuid()
ret = jail(self.app, task_id, ignores_result.name, [4], {})
assert ret == 256
assert not self.app.AsyncResult(task_id).ready()
class test_Request(RequestCase):
def get_request(self, sig, Request=Request, **kwargs):
return Request(
self.task_message_from_sig(self.app, sig),
on_ack=Mock(name='on_ack'),
on_reject=Mock(name='on_reject'),
eventer=Mock(name='eventer'),
app=self.app,
connection_errors=(socket.error,),
task=sig.type,
**kwargs
)
def test_shadow(self):
assert self.get_request(
self.add.s(2, 2).set(shadow='fooxyz')).name == 'fooxyz'
def test_invalid_eta_raises_InvalidTaskError(self):
with pytest.raises(InvalidTaskError):
self.get_request(self.add.s(2, 2).set(eta='12345'))
def test_invalid_expires_raises_InvalidTaskError(self):
with pytest.raises(InvalidTaskError):
self.get_request(self.add.s(2, 2).set(expires='12345'))
def test_valid_expires_with_utc_makes_aware(self):
with patch('celery.worker.request.maybe_make_aware') as mma:
self.get_request(self.add.s(2, 2).set(expires=10),
maybe_make_aware=mma)
mma.assert_called()
def test_maybe_expire_when_expires_is_None(self):
req = self.get_request(self.add.s(2, 2))
assert not req.maybe_expire()
def test_on_retry_acks_if_late(self):
self.add.acks_late = True
req = self.get_request(self.add.s(2, 2))
req.on_retry(Mock())
req.on_ack.assert_called_with(req_logger, req.connection_errors)
def test_on_failure_Termianted(self):
einfo = None
try:
raise Terminated('9')
except Terminated:
einfo = ExceptionInfo()
assert einfo is not None
req = self.get_request(self.add.s(2, 2))
req.on_failure(einfo)
req.eventer.send.assert_called_with(
'task-revoked',
uuid=req.id, terminated=True, signum='9', expired=False,
)
def test_on_failure_propagates_MemoryError(self):
einfo = None
try:
raise MemoryError()
except MemoryError:
einfo = ExceptionInfo(internal=True)
assert einfo is not None
req = self.get_request(self.add.s(2, 2))
with pytest.raises(MemoryError):
req.on_failure(einfo)
def test_on_failure_Ignore_acknowledges(self):
einfo = None
try:
raise Ignore()
except Ignore:
einfo = ExceptionInfo(internal=True)
assert einfo is not None
req = self.get_request(self.add.s(2, 2))
req.on_failure(einfo)
req.on_ack.assert_called_with(req_logger, req.connection_errors)
def test_on_failure_Reject_rejects(self):
einfo = None
try:
raise Reject()
except Reject:
einfo = ExceptionInfo(internal=True)
assert einfo is not None
req = self.get_request(self.add.s(2, 2))
req.on_failure(einfo)
req.on_reject.assert_called_with(
req_logger, req.connection_errors, False,
)
def test_on_failure_Reject_rejects_with_requeue(self):
einfo = None
try:
raise Reject(requeue=True)
except Reject:
einfo = ExceptionInfo(internal=True)
assert einfo is not None
req = self.get_request(self.add.s(2, 2))
req.on_failure(einfo)
req.on_reject.assert_called_with(
req_logger, req.connection_errors, True,
)
def test_on_failure_WorkerLostError_rejects_with_requeue(self):
einfo = None
try:
raise WorkerLostError()
except:
einfo = ExceptionInfo(internal=True)
req = self.get_request(self.add.s(2, 2))
req.task.acks_late = True
req.task.reject_on_worker_lost = True
req.delivery_info['redelivered'] = False
req.on_failure(einfo)
req.on_reject.assert_called_with(
req_logger, req.connection_errors, True)
def test_on_failure_WorkerLostError_redelivered_None(self):
einfo = None
try:
raise WorkerLostError()
except:
einfo = ExceptionInfo(internal=True)
req = self.get_request(self.add.s(2, 2))
req.task.acks_late = True
req.task.reject_on_worker_lost = True
req.delivery_info['redelivered'] = None
req.on_failure(einfo)
req.on_reject.assert_called_with(
req_logger, req.connection_errors, True)
def test_tzlocal_is_cached(self):
req = self.get_request(self.add.s(2, 2))
req._tzlocal = 'foo'
assert req.tzlocal == 'foo'
def test_task_wrapper_repr(self):
assert repr(self.xRequest())
def test_sets_store_errors(self):
self.mytask.ignore_result = True
job = self.xRequest()
assert not job.store_errors
self.mytask.store_errors_even_if_ignored = True
job = self.xRequest()
assert job.store_errors
def test_send_event(self):
job = self.xRequest()
job.eventer = Mock(name='.eventer')
job.send_event('task-frobulated')
job.eventer.send.assert_called_with('task-frobulated', uuid=job.id)
def test_send_events__disabled_at_task_level(self):
job = self.xRequest()
job.task.send_events = False
job.eventer = Mock(name='.eventer')
job.send_event('task-frobulated')
job.eventer.send.assert_not_called()
def test_on_retry(self):
job = self.get_request(self.mytask.s(1, f='x'))
job.eventer = Mock(name='.eventer')
try:
raise Retry('foo', KeyError('moofoobar'))
except:
einfo = ExceptionInfo()
job.on_failure(einfo)
job.eventer.send.assert_called_with(
'task-retried',
uuid=job.id,
exception=safe_repr(einfo.exception.exc),
traceback=safe_str(einfo.traceback),
)
prev, module._does_info = module._does_info, False
try:
job.on_failure(einfo)
finally:
module._does_info = prev
einfo.internal = True
job.on_failure(einfo)
def test_compat_properties(self):
job = self.xRequest()
assert job.task_id == job.id
assert job.task_name == job.name
job.task_id = 'ID'
assert job.id == 'ID'
job.task_name = 'NAME'
assert job.name == 'NAME'
def test_terminate__pool_ref(self):
pool = Mock()
signum = signal.SIGTERM
job = self.get_request(self.mytask.s(1, f='x'))
job._apply_result = Mock(name='_apply_result')
with self.assert_signal_called(
task_revoked, sender=job.task, request=job,
terminated=True, expired=False, signum=signum):
job.time_start = monotonic()
job.worker_pid = 314
job.terminate(pool, signal='TERM')
job._apply_result().terminate.assert_called_with(signum)
job._apply_result = Mock(name='_apply_result2')
job._apply_result.return_value = None
job.terminate(pool, signal='TERM')
def test_terminate__task_started(self):
pool = Mock()
signum = signal.SIGTERM
job = self.get_request(self.mytask.s(1, f='x'))
with self.assert_signal_called(
task_revoked, sender=job.task, request=job,
terminated=True, expired=False, signum=signum):
job.time_start = monotonic()
job.worker_pid = 313
job.terminate(pool, signal='TERM')
pool.terminate_job.assert_called_with(job.worker_pid, signum)
def test_terminate__task_reserved(self):
pool = Mock()
job = self.get_request(self.mytask.s(1, f='x'))
job.time_start = None
job.terminate(pool, signal='TERM')
pool.terminate_job.assert_not_called()
assert job._terminate_on_ack == (pool, 15)
job.terminate(pool, signal='TERM')
def test_revoked_expires_expired(self):
job = self.get_request(self.mytask.s(1, f='x').set(
expires=datetime.utcnow() - timedelta(days=1)
))
with self.assert_signal_called(
task_revoked, sender=job.task, request=job,
terminated=False, expired=True, signum=None):
job.revoked()
assert job.id in revoked
assert self.mytask.backend.get_status(job.id) == states.REVOKED
def test_revoked_expires_not_expired(self):
job = self.xRequest(
expires=datetime.utcnow() + timedelta(days=1),
)
job.revoked()
assert job.id not in revoked
assert self.mytask.backend.get_status(job.id) != states.REVOKED
def test_revoked_expires_ignore_result(self):
self.mytask.ignore_result = True
job = self.xRequest(
expires=datetime.utcnow() - timedelta(days=1),
)
job.revoked()
assert job.id in revoked
assert self.mytask.backend.get_status(job.id) != states.REVOKED
def test_already_revoked(self):
job = self.xRequest()
job._already_revoked = True
assert job.revoked()
def test_revoked(self):
job = self.xRequest()
with self.assert_signal_called(
task_revoked, sender=job.task, request=job,
terminated=False, expired=False, signum=None):
revoked.add(job.id)
assert job.revoked()
assert job._already_revoked
assert job.acknowledged
def test_execute_does_not_execute_revoked(self):
job = self.xRequest()
revoked.add(job.id)
job.execute()
def test_execute_acks_late(self):
self.mytask_raising.acks_late = True
job = self.xRequest(
name=self.mytask_raising.name,
kwargs={},
)
job.execute()
assert job.acknowledged
job.execute()
def test_execute_using_pool_does_not_execute_revoked(self):
job = self.xRequest()
revoked.add(job.id)
with pytest.raises(TaskRevokedError):
job.execute_using_pool(None)
def test_on_accepted_acks_early(self):
job = self.xRequest()
job.on_accepted(pid=os.getpid(), time_accepted=monotonic())
assert job.acknowledged
prev, module._does_debug = module._does_debug, False
try:
job.on_accepted(pid=os.getpid(), time_accepted=monotonic())
finally:
module._does_debug = prev
def test_on_accepted_acks_late(self):
job = self.xRequest()
self.mytask.acks_late = True
job.on_accepted(pid=os.getpid(), time_accepted=monotonic())
assert not job.acknowledged
def test_on_accepted_terminates(self):
signum = signal.SIGTERM
pool = Mock()
job = self.xRequest()
with self.assert_signal_called(
task_revoked, sender=job.task, request=job,
terminated=True, expired=False, signum=signum):
job.terminate(pool, signal='TERM')
assert not pool.terminate_job.call_count
job.on_accepted(pid=314, time_accepted=monotonic())
pool.terminate_job.assert_called_with(314, signum)
def test_on_accepted_time_start(self):
job = self.xRequest()
job.on_accepted(pid=os.getpid(), time_accepted=monotonic())
assert time() - job.time_start < 1
def test_on_success_acks_early(self):
job = self.xRequest()
job.time_start = 1
job.on_success((0, 42, 0.001))
prev, module._does_info = module._does_info, False
try:
job.on_success((0, 42, 0.001))
assert not job.acknowledged
finally:
module._does_info = prev
def test_on_success_BaseException(self):
job = self.xRequest()
job.time_start = 1
with pytest.raises(SystemExit):
try:
raise SystemExit()
except SystemExit:
job.on_success((1, ExceptionInfo(), 0.01))
else:
assert False
def test_on_success_eventer(self):
job = self.xRequest()
job.time_start = 1
job.eventer = Mock()
job.eventer.send = Mock()
job.on_success((0, 42, 0.001))
job.eventer.send.assert_called()
def test_on_success_when_failure(self):
job = self.xRequest()
job.time_start = 1
job.on_failure = Mock()
try:
raise KeyError('foo')
except Exception:
job.on_success((1, ExceptionInfo(), 0.001))
job.on_failure.assert_called()
def test_on_success_acks_late(self):
job = self.xRequest()
job.time_start = 1
self.mytask.acks_late = True
job.on_success((0, 42, 0.001))
assert job.acknowledged
def test_on_failure_WorkerLostError(self):
def get_ei():
try:
raise WorkerLostError('do re mi')
except WorkerLostError:
return ExceptionInfo()
job = self.xRequest()
exc_info = get_ei()
job.on_failure(exc_info)
assert self.mytask.backend.get_status(job.id) == states.FAILURE
self.mytask.ignore_result = True
exc_info = get_ei()
job = self.xRequest()
job.on_failure(exc_info)
assert self.mytask.backend.get_status(job.id) == states.PENDING
def test_on_failure_acks_late(self):
job = self.xRequest()
job.time_start = 1
self.mytask.acks_late = True
try:
raise KeyError('foo')
except KeyError:
exc_info = ExceptionInfo()
job.on_failure(exc_info)
assert job.acknowledged
def test_from_message_invalid_kwargs(self):
m = self.TaskMessage(self.mytask.name, args=(), kwargs='foo')
req = Request(m, app=self.app)
with pytest.raises(InvalidTaskError):
raise req.execute().exception
def test_on_hard_timeout(self, patching):
error = patching('celery.worker.request.error')
job = self.xRequest()
job.acknowledge = Mock(name='ack')
job.task.acks_late = True
job.on_timeout(soft=False, timeout=1337)
assert 'Hard time limit' in error.call_args[0][0]
assert self.mytask.backend.get_status(job.id) == states.FAILURE
job.acknowledge.assert_called_with()
job = self.xRequest()
job.acknowledge = Mock(name='ack')
job.task.acks_late = False
job.on_timeout(soft=False, timeout=1335)
job.acknowledge.assert_not_called()
def test_on_soft_timeout(self, patching):
warn = patching('celery.worker.request.warn')
job = self.xRequest()
job.acknowledge = Mock(name='ack')
job.task.acks_late = True
job.on_timeout(soft=True, timeout=1337)
assert 'Soft time limit' in warn.call_args[0][0]
assert self.mytask.backend.get_status(job.id) == states.PENDING
job.acknowledge.assert_not_called()
self.mytask.ignore_result = True
job = self.xRequest()
job.on_timeout(soft=True, timeout=1336)
assert self.mytask.backend.get_status(job.id) == states.PENDING
def test_fast_trace_task(self):
from celery.app import trace
setup_worker_optimizations(self.app)
assert trace.trace_task_ret is trace._fast_trace_task
tid = uuid()
message = self.TaskMessage(self.mytask.name, tid, args=[4])
assert len(message.payload) == 3
try:
self.mytask.__trace__ = build_tracer(
self.mytask.name, self.mytask, self.app.loader, 'test',
app=self.app,
)
failed, res, runtime = trace.trace_task_ret(
self.mytask.name, tid, message.headers, message.body,
message.content_type, message.content_encoding)
assert not failed
assert res == repr(4 ** 4)
assert runtime is not None
assert isinstance(runtime, numbers.Real)
finally:
reset_worker_optimizations()
assert trace.trace_task_ret is trace._trace_task_ret
delattr(self.mytask, '__trace__')
failed, res, runtime = trace.trace_task_ret(
self.mytask.name, tid, message.headers, message.body,
message.content_type, message.content_encoding, app=self.app,
)
assert not failed
assert res == repr(4 ** 4)
assert runtime is not None
assert isinstance(runtime, numbers.Real)
def test_trace_task_ret(self):
self.mytask.__trace__ = build_tracer(
self.mytask.name, self.mytask, self.app.loader, 'test',
app=self.app,
)
tid = uuid()
message = self.TaskMessage(self.mytask.name, tid, args=[4])
_, R, _ = _trace_task_ret(
self.mytask.name, tid, message.headers,
message.body, message.content_type,
message.content_encoding, app=self.app,
)
assert R == repr(4 ** 4)
def test_trace_task_ret__no_trace(self):
try:
delattr(self.mytask, '__trace__')
except AttributeError:
pass
tid = uuid()
message = self.TaskMessage(self.mytask.name, tid, args=[4])
_, R, _ = _trace_task_ret(
self.mytask.name, tid, message.headers,
message.body, message.content_type,
message.content_encoding, app=self.app,
)
assert R == repr(4 ** 4)
def test_trace_catches_exception(self):
@self.app.task(request=None, shared=False)
def raising():
raise KeyError('baz')
with pytest.warns(RuntimeWarning):
res = trace_task(raising, uuid(), [], {}, app=self.app)[0]
assert isinstance(res, ExceptionInfo)
def test_worker_task_trace_handle_retry(self):
tid = uuid()
self.mytask.push_request(id=tid)
try:
raise ValueError('foo')
except Exception as exc:
try:
raise Retry(str(exc), exc=exc)
except Retry as exc:
w = TraceInfo(states.RETRY, exc)
w.handle_retry(
self.mytask, self.mytask.request, store_errors=False,
)
assert self.mytask.backend.get_status(tid) == states.PENDING
w.handle_retry(
self.mytask, self.mytask.request, store_errors=True,
)
assert self.mytask.backend.get_status(tid) == states.RETRY
finally:
self.mytask.pop_request()
def test_worker_task_trace_handle_failure(self):
tid = uuid()
self.mytask.push_request()
try:
self.mytask.request.id = tid
try:
raise ValueError('foo')
except Exception as exc:
w = TraceInfo(states.FAILURE, exc)
w.handle_failure(
self.mytask, self.mytask.request, store_errors=False,
)
assert self.mytask.backend.get_status(tid) == states.PENDING
w.handle_failure(
self.mytask, self.mytask.request, store_errors=True,
)
assert self.mytask.backend.get_status(tid) == states.FAILURE
finally:
self.mytask.pop_request()
def test_from_message(self):
us = 'æØåveéðƒeæ'
tid = uuid()
m = self.TaskMessage(
self.mytask.name, tid, args=[2], kwargs={us: 'bar'},
)
job = Request(m, app=self.app)
assert isinstance(job, Request)
assert job.name == self.mytask.name
assert job.id == tid
assert job.message is m
def test_from_message_empty_args(self):
tid = uuid()
m = self.TaskMessage(self.mytask.name, tid, args=[], kwargs={})
job = Request(m, app=self.app)
assert isinstance(job, Request)
def test_from_message_missing_required_fields(self):
m = self.TaskMessage(self.mytask.name)
m.headers.clear()
with pytest.raises(KeyError):
Request(m, app=self.app)
def test_from_message_nonexistant_task(self):
m = self.TaskMessage(
'cu.mytask.doesnotexist',
args=[2], kwargs={'æØåveéðƒeæ': 'bar'},
)
with pytest.raises(KeyError):
Request(m, app=self.app)
def test_execute(self):
tid = uuid()
job = self.xRequest(id=tid, args=[4], kwargs={})
assert job.execute() == 256
meta = self.mytask.backend.get_task_meta(tid)
assert meta['status'] == states.SUCCESS
assert meta['result'] == 256
def test_execute_success_no_kwargs(self):
@self.app.task # traverses coverage for decorator without parens
def mytask_no_kwargs(i):
return i ** i
tid = uuid()
job = self.xRequest(
name=mytask_no_kwargs.name,
id=tid,
args=[4],
kwargs={},
)
assert job.execute() == 256
meta = mytask_no_kwargs.backend.get_task_meta(tid)
assert meta['result'] == 256
assert meta['status'] == states.SUCCESS
def test_execute_ack(self):
scratch = {'ACK': False}
def on_ack(*args, **kwargs):
scratch['ACK'] = True
tid = uuid()
job = self.xRequest(id=tid, args=[4], on_ack=on_ack)
assert job.execute() == 256
meta = self.mytask.backend.get_task_meta(tid)
assert scratch['ACK']
assert meta['result'] == 256
assert meta['status'] == states.SUCCESS
def test_execute_fail(self):
tid = uuid()
job = self.xRequest(
name=self.mytask_raising.name,
id=tid,
args=[4],
kwargs={},
)
assert isinstance(job.execute(), ExceptionInfo)
assert self.mytask_raising.backend.serializer == 'pickle'
meta = self.mytask_raising.backend.get_task_meta(tid)
assert meta['status'] == states.FAILURE
assert isinstance(meta['result'], KeyError)
def test_execute_using_pool(self):
tid = uuid()
job = self.xRequest(id=tid, args=[4])
p = Mock()
job.execute_using_pool(p)
p.apply_async.assert_called_once()
args = p.apply_async.call_args[1]['args']
assert args[0] == self.mytask.name
assert args[1] == tid
assert args[2] == job.request_dict
assert args[3] == job.message.body
def _test_on_failure(self, exception, **kwargs):
tid = uuid()
job = self.xRequest(id=tid, args=[4])
job.send_event = Mock(name='send_event')
job.task.backend.mark_as_failure = Mock(name='mark_as_failure')
try:
raise exception
except type(exception):
exc_info = ExceptionInfo()
job.on_failure(exc_info, **kwargs)
job.send_event.assert_called()
return job
def test_on_failure(self):
self._test_on_failure(Exception('Inside unit tests'))
def test_on_failure__unicode_exception(self):
self._test_on_failure(Exception('Бобры атакуют'))
def test_on_failure__utf8_exception(self):
self._test_on_failure(Exception(
from_utf8('Бобры атакуют')))
def test_on_failure__WorkerLostError(self):
exc = WorkerLostError()
job = self._test_on_failure(exc)
job.task.backend.mark_as_failure.assert_called_with(
job.id, exc, request=job, store_result=True,
)
def test_on_failure__return_ok(self):
self._test_on_failure(KeyError(), return_ok=True)
def test_reject(self):
job = self.xRequest(id=uuid())
job.on_reject = Mock(name='on_reject')
job.reject(requeue=True)
job.on_reject.assert_called_with(
req_logger, job.connection_errors, True,
)
assert job.acknowledged
job.on_reject.reset_mock()
job.reject(requeue=True)
job.on_reject.assert_not_called()
def test_group(self):
gid = uuid()
job = self.xRequest(id=uuid(), group=gid)
assert job.group == gid
class test_create_request_class(RequestCase):
def setup(self):
self.task = Mock(name='task')
self.pool = Mock(name='pool')
self.eventer = Mock(name='eventer')
RequestCase.setup(self)
def create_request_cls(self, **kwargs):
return create_request_cls(
Request, self.task, self.pool, 'foo', self.eventer, **kwargs
)
def zRequest(self, Request=None, revoked_tasks=None, ref=None, **kwargs):
return self.xRequest(
Request=Request or self.create_request_cls(
ref=ref,
revoked_tasks=revoked_tasks,
),
**kwargs)
def test_on_success(self):
self.zRequest(id=uuid()).on_success((False, 'hey', 3.1222))
def test_on_success__SystemExit(self,
errors=(SystemExit, KeyboardInterrupt)):
for exc in errors:
einfo = None
try:
raise exc()
except exc:
einfo = ExceptionInfo()
with pytest.raises(exc):
self.zRequest(id=uuid()).on_success((True, einfo, 1.0))
def test_on_success__calls_failure(self):
job = self.zRequest(id=uuid())
einfo = Mock(name='einfo')
job.on_failure = Mock(name='on_failure')
job.on_success((True, einfo, 1.0))
job.on_failure.assert_called_with(einfo, return_ok=True)
def test_on_success__acks_late_enabled(self):
self.task.acks_late = True
job = self.zRequest(id=uuid())
job.acknowledge = Mock(name='ack')
job.on_success((False, 'foo', 1.0))
job.acknowledge.assert_called_with()
def test_on_success__acks_late_disabled(self):
self.task.acks_late = False
job = self.zRequest(id=uuid())
job.acknowledge = Mock(name='ack')
job.on_success((False, 'foo', 1.0))
job.acknowledge.assert_not_called()
def test_on_success__no_events(self):
self.eventer = None
job = self.zRequest(id=uuid())
job.send_event = Mock(name='send_event')
job.on_success((False, 'foo', 1.0))
job.send_event.assert_not_called()
def test_on_success__with_events(self):
job = self.zRequest(id=uuid())
job.send_event = Mock(name='send_event')
job.on_success((False, 'foo', 1.0))
job.send_event.assert_called_with(
'task-succeeded', result='foo', runtime=1.0,
)
def test_execute_using_pool__revoked(self):
tid = uuid()
job = self.zRequest(id=tid, revoked_tasks={tid})
job.revoked = Mock()
job.revoked.return_value = True
with pytest.raises(TaskRevokedError):
job.execute_using_pool(self.pool)
def test_execute_using_pool__expired(self):
tid = uuid()
job = self.zRequest(id=tid, revoked_tasks=set())
job.expires = 1232133
job.revoked = Mock()
job.revoked.return_value = True
with pytest.raises(TaskRevokedError):
job.execute_using_pool(self.pool)
def test_execute_using_pool(self):
from celery.app.trace import trace_task_ret as trace
weakref_ref = Mock(name='weakref.ref')
job = self.zRequest(id=uuid(), revoked_tasks=set(), ref=weakref_ref)
job.execute_using_pool(self.pool)
self.pool.apply_async.assert_called_with(
trace,
args=(job.type, job.id, job.request_dict, job.body,
job.content_type, job.content_encoding),
accept_callback=job.on_accepted,
timeout_callback=job.on_timeout,
callback=job.on_success,
error_callback=job.on_failure,
soft_timeout=self.task.soft_time_limit,
timeout=self.task.time_limit,
correlation_id=job.id,
)
assert job._apply_result
weakref_ref.assert_called_with(self.pool.apply_async())
assert job._apply_result is weakref_ref()
def test_execute_using_pool_with_none_timelimit_header(self):
from celery.app.trace import trace_task_ret as trace
weakref_ref = Mock(name='weakref.ref')
job = self.zRequest(id=uuid(),
revoked_tasks=set(),
ref=weakref_ref,
headers={'timelimit': None})
job.execute_using_pool(self.pool)
self.pool.apply_async.assert_called_with(
trace,
args=(job.type, job.id, job.request_dict, job.body,
job.content_type, job.content_encoding),
accept_callback=job.on_accepted,
timeout_callback=job.on_timeout,
callback=job.on_success,
error_callback=job.on_failure,
soft_timeout=self.task.soft_time_limit,
timeout=self.task.time_limit,
correlation_id=job.id,
)
assert job._apply_result
weakref_ref.assert_called_with(self.pool.apply_async())
assert job._apply_result is weakref_ref()
def test_execute_using_pool__defaults_of_hybrid_to_proto2(self):
weakref_ref = Mock(name='weakref.ref')
headers = strategy.hybrid_to_proto2('', {'id': uuid(),
'task': self.mytask.name})[1]
job = self.zRequest(revoked_tasks=set(), ref=weakref_ref, **headers)
job.execute_using_pool(self.pool)
assert job._apply_result
weakref_ref.assert_called_with(self.pool.apply_async())
assert job._apply_result is weakref_ref()
| apache-2.0 |
to266/hyperspy | hyperspy/models/model1D.py | 1 | 32769 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import copy
import numpy as np
from contextlib import contextmanager
from hyperspy.model import BaseModel, ModelComponents, ModelSpecialSlicers
import hyperspy.drawing.spectrum
from hyperspy.drawing.utils import on_figure_window_close
from hyperspy._signals.eels import Spectrum
from hyperspy.axes import generate_axis
from hyperspy.exceptions import WrongObjectError
from hyperspy.decorators import interactive_range_selector
from hyperspy.drawing.widgets import VerticalLineWidget, LabelWidget
from hyperspy.gui.tools import ComponentFit
from hyperspy.events import EventSupressor
class Model1D(BaseModel):
"""Model and data fitting for one dimensional signals.
A model is constructed as a linear combination of :mod:`components` that
are added to the model using :meth:`append` or :meth:`extend`. There
are many predifined components available in the in the :mod:`components`
module. If needed, new components can be created easily using the code of
existing components as a template.
Once defined, the model can be fitted to the data using :meth:`fit` or
:meth:`multifit`. Once the optimizer reaches the convergence criteria or
the maximum number of iterations the new value of the component parameters
are stored in the components.
It is possible to access the components in the model by their name or by
the index in the model. An example is given at the end of this docstring.
Attributes
----------
spectrum : Spectrum instance
It contains the data to fit.
chisq : A Signal of floats
Chi-squared of the signal (or np.nan if not yet fit)
dof : A Signal of integers
Degrees of freedom of the signal (0 if not yet fit)
red_chisq : Signal instance
Reduced chi-squared.
components : `ModelComponents` instance
The components of the model are attributes of this class. This provides
a convinient way to access the model components when working in IPython
as it enables tab completion.
Methods
-------
append
Append one component to the model.
extend
Append multiple components to the model.
remove
Remove component from model.
as_signal
Generate a Spectrum instance (possible multidimensional)
from the model.
store_current_values
Store the value of the parameters at the current position.
fetch_stored_values
fetch stored values of the parameters.
update_plot
Force a plot update. (In most cases the plot should update
automatically.)
set_signal_range, remove_signal range, reset_signal_range,
add signal_range.
Customize the signal range to fit.
fit, multifit
Fit the model to the data at the current position or the
full dataset.
save_parameters2file, load_parameters_from_file
Save/load the parameter values to/from a file.
plot
Plot the model and the data.
enable_plot_components, disable_plot_components
Plot each component separately. (Use after `plot`.)
set_current_values_to
Set the current value of all the parameters of the given component as
the value for all the dataset.
export_results
Save the value of the parameters in separate files.
plot_results
Plot the value of all parameters at all positions.
print_current_values
Print the value of the parameters at the current position.
enable_adjust_position, disable_adjust_position
Enable/disable interactive adjustment of the position of the components
that have a well defined position. (Use after `plot`).
fit_component
Fit just the given component in the given signal range, that can be
set interactively.
set_parameters_not_free, set_parameters_free
Fit the `free` status of several components and parameters at once.
set_parameters_value
Set the value of a parameter in components in a model to a specified
value.
as_dictionary
Exports the model to a dictionary that can be saved in a file.
Examples
--------
In the following example we create a histogram from a normal distribution
and fit it with a gaussian component. It demonstrates how to create
a model from a :class:`~._signals.spectrum.Spectrum` instance, add
components to it, adjust the value of the parameters of the components,
fit the model to the data and access the components in the model.
>>> s = hs.signals.Spectrum(
np.random.normal(scale=2, size=10000)).get_histogram()
>>> g = hs.model.components.Gaussian()
>>> m = s.create_model()
>>> m.append(g)
>>> m.print_current_values()
Components Parameter Value
Gaussian
sigma 1.000000
A 1.000000
centre 0.000000
>>> g.centre.value = 3
>>> m.print_current_values()
Components Parameter Value
Gaussian
sigma 1.000000
A 1.000000
centre 3.000000
>>> g.sigma.value
1.0
>>> m.fit()
>>> g.sigma.value
1.9779042300856682
>>> m[0].sigma.value
1.9779042300856682
>>> m["Gaussian"].centre.value
-0.072121936813224569
"""
def __init__(self, spectrum, dictionary=None):
self.signal = spectrum
self.axes_manager = self.signal.axes_manager
self._plot = None
self._position_widgets = {}
self._adjust_position_all = None
self._plot_components = False
self._suspend_update = False
self._model_line = None
self._adjust_position_all = None
self.axis = self.axes_manager.signal_axes[0]
self.axes_manager.events.indices_changed.connect(
self.fetch_stored_values, [])
self.channel_switches = np.array([True] * len(self.axis.axis))
self.chisq = spectrum._get_navigation_signal()
self.chisq.change_dtype("float")
self.chisq.data.fill(np.nan)
self.chisq.metadata.General.title = (
self.signal.metadata.General.title + ' chi-squared')
self.dof = self.chisq._deepcopy_with_new_data(
np.zeros_like(self.chisq.data, dtype='int'))
self.dof.metadata.General.title = (
self.signal.metadata.General.title + ' degrees of freedom')
self.free_parameters_boundaries = None
self._low_loss = None
self.convolved = False
self.components = ModelComponents(self)
if dictionary is not None:
self._load_dictionary(dictionary)
self.inav = ModelSpecialSlicers(self, True)
self.isig = ModelSpecialSlicers(self, False)
self._whitelist = {
'channel_switches': None,
'convolved': None,
'free_parameters_boundaries': None,
'low_loss': ('sig', None),
'chisq.data': None,
'dof.data': None}
self._slicing_whitelist = {
'channel_switches': 'isig',
'low_loss': 'inav',
'chisq.data': 'inav',
'dof.data': 'inav'}
@property
def signal(self):
return self._signal
@signal.setter
def signal(self, value):
if isinstance(value, Spectrum):
self._signal = value
else:
raise WrongObjectError(str(type(value)), 'Spectrum')
@property
def low_loss(self):
return self._low_loss
@low_loss.setter
def low_loss(self, value):
if value is not None:
if (value.axes_manager.navigation_shape !=
self.signal.axes_manager.navigation_shape):
raise ValueError('The low-loss does not have '
'the same navigation dimension as the '
'core-loss')
self._low_loss = value
self.set_convolution_axis()
self.convolved = True
else:
self._low_loss = value
self.convolution_axis = None
self.convolved = False
# Extend the list methods to call the _touch when the model is modified
def set_convolution_axis(self):
"""
Creates an axis to use to generate the data of the model in the precise
scale to obtain the correct axis and origin after convolution with the
lowloss spectrum.
"""
ll_axis = self.low_loss.axes_manager.signal_axes[0]
dimension = self.axis.size + ll_axis.size - 1
step = self.axis.scale
knot_position = ll_axis.size - ll_axis.value2index(0) - 1
self.convolution_axis = generate_axis(self.axis.offset, step,
dimension, knot_position)
def append(self, thing):
super(Model1D, self).append(thing)
if self._plot_components:
self._plot_component(thing)
if self._adjust_position_all:
self._make_position_adjuster(thing, self._adjust_position_all[0],
self._adjust_position_all[1])
def remove(self, things):
things = self._get_component(things)
if not np.iterable(things):
things = [things]
for thing in things:
parameter = thing._position
if parameter in self._position_widgets:
for pw in reversed(self._position_widgets[parameter]):
pw.close()
if hasattr(thing, '_model_plot_line'):
line = thing._model_plot_line
line.close()
super(Model1D, self).remove(things)
self._disconnect_parameters2update_plot(things)
remove.__doc__ = BaseModel.remove.__doc__
def _connect_parameters2update_plot(self, components):
if self._plot_active is False:
return
for i, component in enumerate(components):
component.events.active_changed.connect(
self._model_line.update, [])
for parameter in component.parameters:
parameter.events.value_changed.connect(
self._model_line.update, [])
if self._plot_components is True:
self._connect_component_lines()
def _disconnect_parameters2update_plot(self, components):
if self._model_line is None:
return
for component in components:
component.events.active_changed.disconnect(self._model_line.update)
for parameter in component.parameters:
parameter.events.value_changed.disconnect(
self._model_line.update)
if self._plot_components is True:
self._disconnect_component_lines()
def update_plot(self, *args, **kwargs):
"""Update model plot.
The updating can be suspended using `suspend_update`.
See Also
--------
suspend_update
"""
if self._plot_active is True and self._suspend_update is False:
try:
self._update_model_line()
for component in [component for component in self if
component.active is True]:
self._update_component_line(component)
except:
self._disconnect_parameters2update_plot(components=self)
@contextmanager
def suspend_update(self, update_on_resume=True):
"""Prevents plot from updating until 'with' clause completes.
See Also
--------
update_plot
"""
es = EventSupressor()
es.add(self.axes_manager.events.indices_changed)
if self._model_line:
f = self._model_line.update
for c in self:
es.add(c.events, f)
for p in c.parameters:
es.add(p.events, f)
for c in self:
if hasattr(c, '_model_plot_line'):
f = c._model_plot_line.update
es.add(c.events, f)
for p in c.parameters:
es.add(p.events, f)
old = self._suspend_update
self._suspend_update = True
with es.suppress():
yield
self._suspend_update = old
if update_on_resume is True:
self.update_plot()
def _update_model_line(self):
if (self._plot_active is True and
self._model_line is not None):
self._model_line.update()
def __call__(self, non_convolved=False, onlyactive=False):
"""Returns the corresponding model for the current coordinates
Parameters
----------
non_convolved : bool
If True it will return the deconvolved model
only_active : bool
If True, only the active components will be used to build the
model.
cursor: 1 or 2
Returns
-------
numpy array
"""
if self.convolved is False or non_convolved is True:
axis = self.axis.axis[self.channel_switches]
sum_ = np.zeros(len(axis))
if onlyactive is True:
for component in self:
if component.active:
sum_ += component.function(axis)
else:
for component in self:
sum_ += component.function(axis)
to_return = sum_
else: # convolved
sum_convolved = np.zeros(len(self.convolution_axis))
sum_ = np.zeros(len(self.axis.axis))
for component in self: # Cut the parameters list
if onlyactive:
if component.active:
if component.convolved:
sum_convolved += component.function(
self.convolution_axis)
else:
sum_ += component.function(self.axis.axis)
else:
if component.convolved:
sum_convolved += component.function(
self.convolution_axis)
else:
sum_ += component.function(self.axis.axis)
to_return = sum_ + np.convolve(
self.low_loss(self.axes_manager),
sum_convolved, mode="valid")
to_return = to_return[self.channel_switches]
if self.signal.metadata.Signal.binned is True:
to_return *= self.signal.axes_manager[-1].scale
return to_return
def _errfunc(self, param, y, weights=None):
if weights is None:
weights = 1.
errfunc = self._model_function(param) - y
return errfunc * weights
def _set_signal_range_in_pixels(self, i1=None, i2=None):
"""Use only the selected spectral range in the fitting routine.
Parameters
----------
i1 : Int
i2 : Int
Notes
-----
To use the full energy range call the function without arguments.
"""
self.backup_channel_switches = copy.copy(self.channel_switches)
self.channel_switches[:] = False
self.channel_switches[i1:i2] = True
self.update_plot()
@interactive_range_selector
def set_signal_range(self, x1=None, x2=None):
"""Use only the selected spectral range defined in its own units in the
fitting routine.
Parameters
----------
E1 : None or float
E2 : None or float
Notes
-----
To use the full energy range call the function without arguments.
"""
i1, i2 = self.axis.value_range_to_indices(x1, x2)
self._set_signal_range_in_pixels(i1, i2)
def _remove_signal_range_in_pixels(self, i1=None, i2=None):
"""Removes the data in the given range from the data range that
will be used by the fitting rountine
Parameters
----------
x1 : None or float
x2 : None or float
"""
self.channel_switches[i1:i2] = False
self.update_plot()
@interactive_range_selector
def remove_signal_range(self, x1=None, x2=None):
"""Removes the data in the given range from the data range that
will be used by the fitting rountine
Parameters
----------
x1 : None or float
x2 : None or float
"""
i1, i2 = self.axis.value_range_to_indices(x1, x2)
self._remove_signal_range_in_pixels(i1, i2)
def reset_signal_range(self):
"""Resets the data range"""
self._set_signal_range_in_pixels()
def _add_signal_range_in_pixels(self, i1=None, i2=None):
"""Adds the data in the given range from the data range that
will be used by the fitting rountine
Parameters
----------
x1 : None or float
x2 : None or float
"""
self.channel_switches[i1:i2] = True
self.update_plot()
@interactive_range_selector
def add_signal_range(self, x1=None, x2=None):
"""Adds the data in the given range from the data range that
will be used by the fitting rountine
Parameters
----------
x1 : None or float
x2 : None or float
"""
i1, i2 = self.axis.value_range_to_indices(x1, x2)
self._add_signal_range_in_pixels(i1, i2)
def reset_the_signal_range(self):
self.channel_switches[:] = True
self.update_plot()
def _jacobian(self, param, y, weights=None):
if weights is None:
weights = 1.
if self.convolved is True:
counter = 0
grad = np.zeros(len(self.axis.axis))
for component in self: # Cut the parameters list
if component.active:
component.fetch_values_from_array(
param[
counter:counter +
component._nfree_param],
onlyfree=True)
if component.convolved:
for parameter in component.free_parameters:
par_grad = np.convolve(
parameter.grad(self.convolution_axis),
self.low_loss(self.axes_manager),
mode="valid")
if parameter._twins:
for par in parameter._twins:
np.add(par_grad, np.convolve(
par.grad(
self.convolution_axis),
self.low_loss(self.axes_manager),
mode="valid"), par_grad)
grad = np.vstack((grad, par_grad))
else:
for parameter in component.free_parameters:
par_grad = parameter.grad(self.axis.axis)
if parameter._twins:
for par in parameter._twins:
np.add(par_grad, par.grad(
self.axis.axis), par_grad)
grad = np.vstack((grad, par_grad))
counter += component._nfree_param
to_return = grad[1:, self.channel_switches] * weights
else:
axis = self.axis.axis[self.channel_switches]
counter = 0
grad = axis
for component in self: # Cut the parameters list
if component.active:
component.fetch_values_from_array(
param[
counter:counter +
component._nfree_param],
onlyfree=True)
for parameter in component.free_parameters:
par_grad = parameter.grad(axis)
if parameter._twins:
for par in parameter._twins:
np.add(par_grad, par.grad(
axis), par_grad)
grad = np.vstack((grad, par_grad))
counter += component._nfree_param
to_return = grad[1:, :] * weights
if self.signal.metadata.Signal.binned is True:
to_return *= self.signal.axes_manager[-1].scale
return to_return
def _function4odr(self, param, x):
return self._model_function(param)
def _jacobian4odr(self, param, x):
return self._jacobian(param, x)
def _poisson_likelihood_function(self, param, y, weights=None):
"""Returns the likelihood function of the model for the given
data and parameters
"""
mf = self._model_function(param)
with np.errstate(invalid='ignore'):
return -(y * np.log(mf) - mf).sum()
def _gradient_ml(self, param, y, weights=None):
mf = self._model_function(param)
return -(self._jacobian(param, y) * (y / mf - 1)).sum(1)
def _gradient_ls(self, param, y, weights=None):
gls = (2 * self._errfunc(param, y, weights) *
self._jacobian(param, y)).sum(1)
return gls
def plot(self, plot_components=False):
"""Plots the current spectrum to the screen and a map with a
cursor to explore the SI.
Parameters
----------
plot_components : bool
If True, add a line per component to the signal figure.
"""
# If new coordinates are assigned
self.signal.plot()
_plot = self.signal._plot
l1 = _plot.signal_plot.ax_lines[0]
color = l1.line.get_color()
l1.set_line_properties(color=color, type='scatter')
l2 = hyperspy.drawing.spectrum.SpectrumLine()
l2.data_function = self._model2plot
l2.set_line_properties(color='blue', type='line')
# Add the line to the figure
_plot.signal_plot.add_line(l2)
l2.plot()
on_figure_window_close(_plot.signal_plot.figure,
self._close_plot)
self._model_line = l2
self._plot = self.signal._plot
self._connect_parameters2update_plot(self)
if plot_components is True:
self.enable_plot_components()
else:
# If we were plotted before, make sure we reset state here
self.disable_plot_components()
# If we were plotted before, make sure we reset state here
self.disable_adjust_position()
@staticmethod
def _connect_component_line(component):
if hasattr(component, "_model_plot_line"):
f = component._model_plot_line.update
component.events.active_changed.connect(f, [])
for parameter in component.parameters:
parameter.events.value_changed.connect(f, [])
@staticmethod
def _disconnect_component_line(component):
if hasattr(component, "_model_plot_line"):
f = component._model_plot_line.update
component.events.active_changed.disconnect(f)
for parameter in component.parameters:
parameter.events.value_changed.disconnect(f)
def _connect_component_lines(self):
for component in self:
if component.active:
self._connect_component_line(component)
def _disconnect_component_lines(self):
for component in self:
if component.active:
self._disconnect_component_line(component)
def _plot_component(self, component):
line = hyperspy.drawing.spectrum.SpectrumLine()
line.data_function = component._component2plot
# Add the line to the figure
self._plot.signal_plot.add_line(line)
line.plot()
component._model_plot_line = line
self._connect_component_line(component)
@staticmethod
def _update_component_line(component):
if hasattr(component, "_model_plot_line"):
component._model_plot_line.update()
def _disable_plot_component(self, component):
self._disconnect_component_line(component)
if hasattr(component, "_model_plot_line"):
component._model_plot_line.close()
del component._model_plot_line
self._plot_components = False
def _close_plot(self):
if self._plot_components is True:
self.disable_plot_components()
self._disconnect_parameters2update_plot(components=self)
self._model_line = None
def enable_plot_components(self):
if self._plot is None or self._plot_components:
return
self._plot_components = True
for component in [component for component in self if
component.active]:
self._plot_component(component)
def disable_plot_components(self):
if self._plot is None:
return
for component in self:
self._disable_plot_component(component)
self._plot_components = False
def enable_adjust_position(
self, components=None, fix_them=True, show_label=True):
"""Allow changing the *x* position of component by dragging
a vertical line that is plotted in the signal model figure
Parameters
----------
components : {None, list of components}
If None, the position of all the active components of the
model that has a well defined *x* position with a value
in the axis range will get a position adjustment line.
Otherwise the feature is added only to the given components.
The components can be specified by name, index or themselves.
fix_them : bool
If True the position parameter of the components will be
temporarily fixed until adjust position is disable.
This can
be useful to iteratively adjust the component positions and
fit the model.
show_label : bool, optional
If True, a label showing the component name is added to the
plot next to the vertical line.
See also
--------
disable_adjust_position
"""
if (self._plot is None or
self._plot.is_active() is False):
self.plot()
if self._position_widgets:
self.disable_adjust_position()
on_figure_window_close(self._plot.signal_plot.figure,
self.disable_adjust_position)
if components:
components = [self._get_component(x) for x in components]
else:
self._adjust_position_all = (fix_them, show_label)
components = components if components else self
if not components:
# The model does not have components so we do nothing
return
components = [
component for component in components if component.active]
for component in components:
self._make_position_adjuster(component, fix_them, show_label)
def _make_position_adjuster(self, component, fix_it, show_label):
if (component._position is None or component._position.twin):
return
axis = self.axes_manager.signal_axes[0]
# Create the vertical line and labels
widgets = [VerticalLineWidget(self.axes_manager)]
if show_label:
label = LabelWidget(self.axes_manager)
label.string = component._get_short_description().replace(
' component', '')
widgets.append(label)
self._position_widgets[component._position] = widgets
for w in widgets:
# Setup widget
w.axes = (axis,)
w.snap_position = False
w.position = (component._position.value,)
w.set_mpl_ax(self._plot.signal_plot.ax)
# Create widget -> parameter connection
w.events.moved.connect(self._on_widget_moved, {'obj': 'widget'})
# Create parameter -> widget connection
component._position.events.value_changed.connect(
w._set_position, dict(value='position'))
# Map relation for close event
w.events.closed.connect(self._on_position_widget_close,
{'obj': 'widget'})
def _reverse_lookup_position_widget(self, widget):
for parameter, widgets in self._position_widgets.items():
if widget in widgets:
return parameter
raise KeyError()
def _on_widget_moved(self, widget):
parameter = self._reverse_lookup_position_widget(widget)
es = EventSupressor()
for w in self._position_widgets[parameter]:
es.add((w.events.moved, w._set_position))
with es.suppress():
parameter.value = widget.position[0]
def _on_position_widget_close(self, widget):
widget.events.closed.disconnect(self._on_position_widget_close)
parameter = self._reverse_lookup_position_widget(widget)
self._position_widgets[parameter].remove(widget)
if len(self._position_widgets[parameter]) == 0:
self._position_widgets.pop(parameter)
parameter.events.value_changed.disconnect(widget._set_position)
widget.events.moved.disconnect(self._on_widget_moved)
def disable_adjust_position(self):
"""Disables the interactive adjust position feature
See also
--------
enable_adjust_position
"""
self._adjust_position_all = False
for pws in list(self._position_widgets.values()):
# Iteration works on a copied collection, so changes during
# iteration should be ok
for pw in reversed(pws): # pws is reference, so work in reverse
pw.close()
def fit_component(
self,
component,
signal_range="interactive",
estimate_parameters=True,
fit_independent=False,
only_current=True,
**kwargs):
"""Fit just the given component in the given signal range.
This method is useful to obtain starting parameters for the
components. Any keyword arguments are passed to the fit method.
Parameters
----------
component : component instance
The component must be in the model, otherwise an exception
is raised. The component can be specified by name, index or itself.
signal_range : {'interactive', (left_value, right_value), None}
If 'interactive' the signal range is selected using the span
selector on the spectrum plot. The signal range can also
be manually specified by passing a tuple of floats. If None
the current signal range is used.
estimate_parameters : bool, default True
If True will check if the component has an
estimate_parameters function, and use it to estimate the
parameters in the component.
fit_independent : bool, default False
If True, all other components are disabled. If False, all other
component paramemeters are fixed.
Examples
--------
Signal range set interactivly
>>> g1 = hs.model.components.Gaussian()
>>> m.append(g1)
>>> m.fit_component(g1)
Signal range set through direct input
>>> m.fit_component(g1, signal_range=(50,100))
"""
component = self._get_component(component)
cf = ComponentFit(self, component, signal_range,
estimate_parameters, fit_independent,
only_current, **kwargs)
if signal_range == "interactive":
cf.edit_traits()
else:
cf.apply()
| gpl-3.0 |
DrAA/avro | lang/py/src/avro/tether/tether_task.py | 18 | 17523 | """
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
__all__=["TetherTask","TaskType","inputProtocol","outputProtocol","HTTPRequestor"]
from avro import schema, protocol
from avro import io as avio
from avro import ipc
import io as pyio
import sys
import os
import traceback
import logging
import collections
from StringIO import StringIO
import threading
# create protocol objects for the input and output protocols
# The build process should copy InputProtocol.avpr and OutputProtocol.avpr
# into the same directory as this module
inputProtocol=None
outputProtocol=None
TaskType=None
if (inputProtocol is None):
pfile=os.path.split(__file__)[0]+os.sep+"InputProtocol.avpr"
if not(os.path.exists(pfile)):
raise Exception("Could not locate the InputProtocol: {0} does not exist".format(pfile))
with file(pfile,'r') as hf:
prototxt=hf.read()
inputProtocol=protocol.parse(prototxt)
# use a named tuple to represent the tasktype enumeration
taskschema=inputProtocol.types_dict["TaskType"]
_ttype=collections.namedtuple("_tasktype",taskschema.symbols)
TaskType=_ttype(*taskschema.symbols)
if (outputProtocol is None):
pfile=os.path.split(__file__)[0]+os.sep+"OutputProtocol.avpr"
if not(os.path.exists(pfile)):
raise Exception("Could not locate the OutputProtocol: {0} does not exist".format(pfile))
with file(pfile,'r') as hf:
prototxt=hf.read()
outputProtocol=protocol.parse(prototxt)
class Collector(object):
"""
Collector for map and reduce output values
"""
def __init__(self,scheme=None,outputClient=None):
"""
Parameters
---------------------------------------------
scheme - The scheme for the datums to output - can be a json string
- or an instance of Schema
outputClient - The output client used to send messages to the parent
"""
if not(isinstance(scheme,schema.Schema)):
scheme=schema.parse(scheme)
if (outputClient is None):
raise ValueError("output client can't be none.")
self.scheme=scheme
self.buff=StringIO()
self.encoder=avio.BinaryEncoder(self.buff)
self.datum_writer = avio.DatumWriter(writers_schema=self.scheme)
self.outputClient=outputClient
def collect(self,record,partition=None):
"""Collect a map or reduce output value
Parameters
------------------------------------------------------
record - The record to write
partition - Indicates the partition for a pre-partitioned map output
- currently not supported
"""
self.buff.truncate(0)
self.datum_writer.write(record, self.encoder);
self.buff.flush();
self.buff.seek(0)
# delete all the data in the buffer
if (partition is None):
# TODO: Is there a more efficient way to read the data in self.buff?
# we could use self.buff.read() but that returns the byte array as a string
# will that work? We can also use self.buff.readinto to read it into
# a bytearray but the byte array must be pre-allocated
# self.outputClient.output(self.buff.buffer.read())
#its not a StringIO
self.outputClient.request("output",{"datum":self.buff.read()})
else:
self.outputClient.request("outputPartitioned",{"datum":self.buff.read(),"partition":partition})
def keys_are_equal(rec1,rec2,fkeys):
"""Check if the "keys" in two records are equal. The key fields
are all fields for which order isn't marked ignore.
Parameters
-------------------------------------------------------------------------
rec1 - The first record
rec2 - The second record
fkeys - A list of the fields to compare
"""
for f in fkeys:
if not(rec1[f]==rec2[f]):
return False
return True
class HTTPRequestor(object):
"""
This is a small requestor subclass I created for the HTTP protocol.
Since the HTTP protocol isn't persistent, we need to instantiate
a new transciever and new requestor for each request.
But I wanted to use of the requestor to be identical to that for
SocketTransciever so that we can seamlessly switch between the two.
"""
def __init__(self, server,port,protocol):
"""
Instantiate the class.
Parameters
----------------------------------------------------------------------
server - The server hostname
port - Which port to use
protocol - The protocol for the communication
"""
self.server=server
self.port=port
self.protocol=protocol
def request(self,*args,**param):
transciever=ipc.HTTPTransceiver(self.server,self.port)
requestor=ipc.Requestor(self.protocol, transciever)
return requestor.request(*args,**param)
class TetherTask(object):
"""
Base class for python tether mapreduce programs.
ToDo: Currently the subclass has to implement both reduce and reduceFlush.
This is not very pythonic. A pythonic way to implement the reducer
would be to pass the reducer a generator (as dumbo does) so that the user
could iterate over the records for the given key.
How would we do this. I think we would need to have two threads, one thread would run
the user's reduce function. This loop would be suspended when no reducer records were available.
The other thread would read in the records for the reducer. This thread should
only buffer so many records at a time (i.e if the buffer is full, self.input shouldn't return right
away but wait for space to free up)
"""
def __init__(self,inschema=None,midschema=None,outschema=None):
"""
Parameters
---------------------------------------------------------
inschema - The scheme for the input to the mapper
midschema - The scheme for the output of the mapper
outschema - The scheme for the output of the reducer
An example scheme for the prototypical word count example would be
inscheme='{"type":"record", "name":"Pair","namespace":"org.apache.avro.mapred","fields":[
{"name":"key","type":"string"},
{"name":"value","type":"long","order":"ignore"}]
}'
Important: The records are split into (key,value) pairs as required by map reduce
by using all fields with "order"=ignore for the key and the remaining fields for the value.
The subclass provides these schemas in order to tell this class which schemas it expects.
The configure request will also provide the schemas that the parent process is using.
This allows us to check whether the schemas match and if not whether we can resolve
the differences (see http://avro.apache.org/docs/current/spec.html#Schema+Resolution))
"""
if (inschema is None):
raise ValueError("inschema can't be None")
if (midschema is None):
raise ValueError("midschema can't be None")
if (outschema is None):
raise ValueError("outschema can't be None")
# make sure we can parse the schemas
# Should we call fail if we can't parse the schemas?
self.inschema=schema.parse(inschema)
self.midschema=schema.parse(midschema)
self.outschema=schema.parse(outschema)
# declare various variables
self.clienTransciever=None
# output client is used to communicate with the parent process
# in particular to transmit the outputs of the mapper and reducer
self.outputClient = None
# collectors for the output of the mapper and reducer
self.midCollector=None
self.outCollector=None
self._partitions=None
# cache a list of the fields used by the reducer as the keys
# we need the fields to decide when we have finished processing all values for
# a given key. We cache the fields to be more efficient
self._red_fkeys=None
# We need to keep track of the previous record fed to the reducer
# b\c we need to be able to determine when we start processing a new group
# in the reducer
self.midRecord=None
# create an event object to signal when
# http server is ready to be shutdown
self.ready_for_shutdown=threading.Event()
self.log=logging.getLogger("TetherTask")
def open(self, inputport,clientPort=None):
"""Open the output client - i.e the connection to the parent process
Parameters
---------------------------------------------------------------
inputport - This is the port that the subprocess is listening on. i.e the
subprocess starts a server listening on this port to accept requests from
the parent process
clientPort - The port on which the server in the parent process is listening
- If this is None we look for the environment variable AVRO_TETHER_OUTPUT_PORT
- This is mainly provided for debugging purposes. In practice
we want to use the environment variable
"""
# Open the connection to the parent process
# The port the parent process is listening on is set in the environment
# variable AVRO_TETHER_OUTPUT_PORT
# open output client, connecting to parent
if (clientPort is None):
clientPortString = os.getenv("AVRO_TETHER_OUTPUT_PORT")
if (clientPortString is None):
raise Exception("AVRO_TETHER_OUTPUT_PORT env var is not set")
clientPort = int(clientPortString)
self.log.info("TetherTask.open: Opening connection to parent server on port={0}".format(clientPort))
# We use the HTTP protocol although we hope to shortly have
# support for SocketServer,
usehttp=True
if(usehttp):
# self.outputClient = ipc.Requestor(outputProtocol, self.clientTransceiver)
# since HTTP is stateless, a new transciever
# is created and closed for each request. We therefore set clientTransciever to None
# We still declare clientTransciever because for other (state) protocols we will need
# it and we want to check when we get the message fail whether the transciever
# needs to be closed.
# self.clientTranciever=None
self.outputClient = HTTPRequestor("127.0.0.1",clientPort,outputProtocol)
else:
raise NotImplementedError("Only http protocol is currently supported")
try:
self.outputClient.request('configure',{"port":inputport})
except Exception as e:
estr= traceback.format_exc()
self.fail(estr)
def configure(self,taskType, inSchemaText, outSchemaText):
"""
Parameters
-------------------------------------------------------------------
taskType - What type of task (e.g map, reduce)
- This is an enumeration which is specified in the input protocol
inSchemaText - string containing the input schema
- This is the actual schema with which the data was encoded
i.e it is the writer_schema (see http://avro.apache.org/docs/current/spec.html#Schema+Resolution)
This is the schema the parent process is using which might be different
from the one provided by the subclass of tether_task
outSchemaText - string containing the output scheme
- This is the schema expected by the parent process for the output
"""
self.taskType = taskType
try:
inSchema = schema.parse(inSchemaText)
outSchema = schema.parse(outSchemaText)
if (taskType==TaskType.MAP):
self.inReader=avio.DatumReader(writers_schema=inSchema,readers_schema=self.inschema)
self.midCollector=Collector(outSchemaText,self.outputClient)
elif(taskType==TaskType.REDUCE):
self.midReader=avio.DatumReader(writers_schema=inSchema,readers_schema=self.midschema)
# this.outCollector = new Collector<OUT>(outSchema);
self.outCollector=Collector(outSchemaText,self.outputClient)
# determine which fields in the input record are they keys for the reducer
self._red_fkeys=[f.name for f in self.midschema.fields if not(f.order=='ignore')]
except Exception as e:
estr= traceback.format_exc()
self.fail(estr)
def set_partitions(self,npartitions):
try:
self._partitions=npartitions
except Exception as e:
estr= traceback.format_exc()
self.fail(estr)
def get_partitions():
""" Return the number of map output partitions of this job."""
return self._partitions
def input(self,data,count):
""" Recieve input from the server
Parameters
------------------------------------------------------
data - Sould containg the bytes encoding the serialized data
- I think this gets represented as a tring
count - how many input records are provided in the binary stream
"""
try:
# to avio.BinaryDecoder
bdata=StringIO(data)
decoder = avio.BinaryDecoder(bdata)
for i in range(count):
if (self.taskType==TaskType.MAP):
inRecord = self.inReader.read(decoder)
# Do we need to pass midCollector if its declared as an instance variable
self.map(inRecord, self.midCollector)
elif (self.taskType==TaskType.REDUCE):
# store the previous record
prev = self.midRecord
# read the new record
self.midRecord = self.midReader.read(decoder);
if (prev != None and not(keys_are_equal(self.midRecord,prev,self._red_fkeys))):
# since the key has changed we need to finalize the processing
# for this group of key,value pairs
self.reduceFlush(prev, self.outCollector)
self.reduce(self.midRecord, self.outCollector)
except Exception as e:
estr= traceback.format_exc()
self.log.warning("failing: "+estr)
self.fail(estr)
def complete(self):
"""
Process the complete request
"""
if ((self.taskType == TaskType.REDUCE ) and not(self.midRecord is None)):
try:
self.reduceFlush(self.midRecord, self.outCollector);
except Exception as e:
estr=traceback.format_exc()
self.log.warning("failing: "+estr);
self.fail(estr)
self.outputClient.request("complete",dict())
def map(self,record,collector):
"""Called with input values to generate intermediat values (i.e mapper output).
Parameters
----------------------------------------------------------------------------
record - The input record
collector - The collector to collect the output
This is an abstract function which should be overloaded by the application specific
subclass.
"""
raise NotImplementedError("This is an abstract method which should be overloaded in the subclass")
def reduce(self,record, collector):
""" Called with input values to generate reducer output. Inputs are sorted by the mapper
key.
The reduce function is invoked once for each value belonging to a given key outputted
by the mapper.
Parameters
----------------------------------------------------------------------------
record - The mapper output
collector - The collector to collect the output
This is an abstract function which should be overloaded by the application specific
subclass.
"""
raise NotImplementedError("This is an abstract method which should be overloaded in the subclass")
def reduceFlush(self,record, collector):
"""
Called with the last intermediate value in each equivalence run.
In other words, reduceFlush is invoked once for each key produced in the reduce
phase. It is called after reduce has been invoked on each value for the given key.
Parameters
------------------------------------------------------------------
record - the last record on which reduce was invoked.
"""
raise NotImplementedError("This is an abstract method which should be overloaded in the subclass")
def status(self,message):
"""
Called to update task status
"""
self.outputClient.request("status",{"message":message})
def count(self,group, name, amount):
"""
Called to increment a counter
"""
self.outputClient.request("count",{"group":group, "name":name, "amount":amount})
def fail(self,message):
"""
Call to fail the task.
"""
self.log.error("TetherTask.fail: failure occured message follows:\n{0}".format(message))
try:
self.outputClient.request("fail",{"message":message})
except Exception as e:
estr=traceback.format_exc()
self.log.error("TetherTask.fail: an exception occured while trying to send the fail message to the output server:\n{0}".format(estr))
self.close()
def close(self):
self.log.info("TetherTask.close: closing")
if not(self.clienTransciever is None):
try:
self.clienTransciever.close()
except Exception as e:
# ignore exceptions
pass
# http server is ready to be shutdown
self.ready_for_shutdown.set()
| apache-2.0 |
civisanalytics/ansible | lib/ansible/modules/network/nxos/nxos_evpn_global.py | 4 | 2899 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'version': '1.0'
}
DOCUMENTATION = '''
---
module: nxos_evpn_global
version_added: "2.2"
short_description: Handles the EVPN control plane for VXLAN.
description:
- Handles the EVPN control plane for VXLAN.
author: Gabriele Gerbino (@GGabriele)
options:
nv_overlay_evpn:
description:
- EVPN control plane.
required: true
choices: ['true', 'false']
'''
EXAMPLES = '''
- nxos_evpn_global:
nv_overlay_evpn: true
'''
RETURN = '''
commands:
description: The set of commands to be sent to the remote device
returned: always
type: list
sample: ['nv overlay evpn']
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.nxos import get_config, load_config
from ansible.module_utils.nxos import nxos_argument_spec
from ansible.module_utils.nxos import check_args as nxos_check_args
def check_args(module, warnings):
nxos_check_args(module, warnings)
for key in ('include_defaults', 'config', 'save'):
if module.params[key] is not None:
warnings.append('argument %s is no longer supported, ignoring value' % key)
def main():
argument_spec = dict(
nv_overlay_evpn=dict(required=True, type='bool'),
# deprecated in Ans2.3
include_defaults=dict(),
config=dict(),
save=dict()
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
check_args(module, warnings)
if warnings:
result['warnings'] = warnings
config = get_config(module)
commands = list()
if module.params['nv_overlay_evpn'] is True:
if 'nv overlay evpn' not in config:
commands.append('nv overlay evpn')
elif 'nv overlay evpn' in config:
commands.append('no nv overlay evpn')
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
result['commands'] = commands
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
mbareta/edx-platform-ft | lms/djangoapps/notification_prefs/tests.py | 26 | 9892 | import json
from django.contrib.auth.models import AnonymousUser
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import Http404
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from mock import Mock, patch
from notification_prefs import NOTIFICATION_PREF_KEY
from notification_prefs.views import ajax_enable, ajax_disable, ajax_status, set_subscription, UsernameCipher
from student.tests.factories import UserFactory
from openedx.core.djangoapps.user_api.models import UserPreference
from util.testing import UrlResetMixin
@override_settings(SECRET_KEY="test secret key")
class NotificationPrefViewTest(UrlResetMixin, TestCase):
INITIALIZATION_VECTOR = "\x00" * 16
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(NotificationPrefViewTest, self).setUp()
self.user = UserFactory.create(username="testuser")
# Tokens are intentionally hard-coded instead of computed to help us
# avoid breaking existing links.
self.tokens = {
self.user: "AAAAAAAAAAAAAAAAAAAAAA8mMQo96FZfb1YKv1R5X6s=",
# Username with length equal to AES block length to test padding
UserFactory.create(username="sixteencharsuser"):
"AAAAAAAAAAAAAAAAAAAAAPxPWCuI2Ay9TATBVnfw7eIj-hUh6erQ_-VkbDqHqm8D",
# Even longer username
UserFactory.create(username="thisusernameissoveryverylong"):
"AAAAAAAAAAAAAAAAAAAAAPECbYqPI7_W4mRF8LbTaHuHt3tNXPggZ1Bke-zDyEiZ",
# Non-ASCII username
UserFactory.create(username=u"\u4e2d\u56fd"):
"AAAAAAAAAAAAAAAAAAAAAMjfGAhZKIZsI3L-Z7nflTA="
}
self.request_factory = RequestFactory()
def create_prefs(self):
"""Create all test preferences in the database"""
for (user, token) in self.tokens.items():
UserPreference.objects.create(user=user, key=NOTIFICATION_PREF_KEY, value=token)
def assertPrefValid(self, user):
"""Ensure that the correct preference for the user is persisted"""
pref = UserPreference.objects.get(user=user, key=NOTIFICATION_PREF_KEY)
self.assertTrue(pref) # check exists and only 1 (.get)
# now coerce username to utf-8 encoded str, since we test with non-ascii unicdoe above and
# the unittest framework has hard time coercing to unicode.
# decrypt also can't take a unicode input, so coerce its input to str
self.assertEqual(str(user.username.encode('utf-8')), UsernameCipher().decrypt(str(pref.value)))
def assertNotPrefExists(self, user):
"""Ensure that the user does not have a persisted preference"""
self.assertFalse(
UserPreference.objects.filter(user=user, key=NOTIFICATION_PREF_KEY).exists()
)
# AJAX status view
def test_ajax_status_get_0(self):
request = self.request_factory.get("dummy")
request.user = self.user
response = ajax_status(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), {"status": 0})
def test_ajax_status_get_1(self):
self.create_prefs()
request = self.request_factory.get("dummy")
request.user = self.user
response = ajax_status(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), {"status": 1})
def test_ajax_status_post(self):
request = self.request_factory.post("dummy")
request.user = self.user
response = ajax_status(request)
self.assertEqual(response.status_code, 405)
def test_ajax_status_anon_user(self):
request = self.request_factory.get("dummy")
request.user = AnonymousUser()
self.assertRaises(PermissionDenied, ajax_status, request)
# AJAX enable view
def test_ajax_enable_get(self):
request = self.request_factory.get("dummy")
request.user = self.user
response = ajax_enable(request)
self.assertEqual(response.status_code, 405)
self.assertNotPrefExists(self.user)
def test_ajax_enable_anon_user(self):
request = self.request_factory.post("dummy")
request.user = AnonymousUser()
self.assertRaises(PermissionDenied, ajax_enable, request)
self.assertNotPrefExists(self.user)
@patch("Crypto.Random.new")
def test_ajax_enable_success(self, mock_random_new):
mock_stream = Mock()
mock_stream.read.return_value = self.INITIALIZATION_VECTOR
mock_random_new.return_value = mock_stream
def test_user(user):
request = self.request_factory.post("dummy")
request.user = user
response = ajax_enable(request)
self.assertEqual(response.status_code, 204)
self.assertPrefValid(user)
for user in self.tokens.keys():
test_user(user)
def test_ajax_enable_already_enabled(self):
self.create_prefs()
request = self.request_factory.post("dummy")
request.user = self.user
response = ajax_enable(request)
self.assertEqual(response.status_code, 204)
self.assertPrefValid(self.user)
def test_ajax_enable_distinct_values(self):
request = self.request_factory.post("dummy")
request.user = self.user
ajax_enable(request)
other_user = UserFactory.create()
request.user = other_user
ajax_enable(request)
self.assertNotEqual(
UserPreference.objects.get(user=self.user, key=NOTIFICATION_PREF_KEY).value,
UserPreference.objects.get(user=other_user, key=NOTIFICATION_PREF_KEY).value
)
# AJAX disable view
def test_ajax_disable_get(self):
self.create_prefs()
request = self.request_factory.get("dummy")
request.user = self.user
response = ajax_disable(request)
self.assertEqual(response.status_code, 405)
self.assertPrefValid(self.user)
def test_ajax_disable_anon_user(self):
self.create_prefs()
request = self.request_factory.post("dummy")
request.user = AnonymousUser()
self.assertRaises(PermissionDenied, ajax_disable, request)
self.assertPrefValid(self.user)
def test_ajax_disable_success(self):
self.create_prefs()
request = self.request_factory.post("dummy")
request.user = self.user
response = ajax_disable(request)
self.assertEqual(response.status_code, 204)
self.assertNotPrefExists(self.user)
def test_ajax_disable_already_disabled(self):
request = self.request_factory.post("dummy")
request.user = self.user
response = ajax_disable(request)
self.assertEqual(response.status_code, 204)
self.assertNotPrefExists(self.user)
# Unsubscribe view
def test_unsubscribe_post(self):
request = self.request_factory.post("dummy")
response = set_subscription(request, "dummy", subscribe=False)
self.assertEqual(response.status_code, 405)
def test_unsubscribe_invalid_token(self):
def test_invalid_token(token, message):
request = self.request_factory.get("dummy")
self.assertRaisesRegexp(Http404, "^{}$".format(message), set_subscription, request, token, False)
# Invalid base64 encoding
test_invalid_token("ZOMG INVALID BASE64 CHARS!!!", "base64url")
test_invalid_token("Non-ASCII\xff", "base64url")
test_invalid_token(self.tokens[self.user][:-1], "base64url")
# Token not long enough to contain initialization vector
test_invalid_token("AAAAAAAAAAA=", "initialization_vector")
# Token length not a multiple of AES block length
test_invalid_token(self.tokens[self.user][:-4], "aes")
# Invalid padding (ends in 0 byte)
# Encrypted value: "testuser" + "\x00" * 8
test_invalid_token("AAAAAAAAAAAAAAAAAAAAAMoazRI7ePLjEWXN1N7keLw=", "padding")
# Invalid padding (ends in byte > 16)
# Encrypted value: "testusertestuser"
test_invalid_token("AAAAAAAAAAAAAAAAAAAAAC6iLXGhjkFytJoJSBJZzJ4=", "padding")
# Invalid padding (entire string is padding)
# Encrypted value: "\x10" * 16
test_invalid_token("AAAAAAAAAAAAAAAAAAAAANRGw8HDEmlcLVFawgY9wI8=", "padding")
# Nonexistent user
# Encrypted value: "nonexistentuser\x01"
test_invalid_token("AAAAAAAAAAAAAAAAAAAAACpyUxTGIrUjnpuUsNi7mAY=", "username")
def test_unsubscribe_success(self):
self.create_prefs()
def test_user(user):
url = reverse('unsubscribe_forum_update', args=[self.tokens[user]])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertNotPrefExists(user)
for user in self.tokens.keys():
test_user(user)
def test_unsubscribe_twice(self):
self.create_prefs()
url = reverse('unsubscribe_forum_update', args=[self.tokens[self.user]])
self.client.get(url)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertNotPrefExists(self.user)
def test_resubscribe_success(self):
def test_user(user):
# start without a pref key
self.assertFalse(UserPreference.objects.filter(user=user, key=NOTIFICATION_PREF_KEY))
url = reverse('resubscribe_forum_update', args=[self.tokens[user]])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertPrefValid(user)
for user in self.tokens.keys():
test_user(user)
| agpl-3.0 |
HaroldMills/Vesper | scripts/create_clip_zip_files.py | 1 | 1964 | """
Script that creates zip files from Vesper archive clip directories.
The script puts the zip files in a directory called "Clips (zipped)"
that is a sibling of the archive's "Clips" directory. The "Clips (zipped)"
directory has the same structure as the "Clips" directory, except that
each grandchild directory of the "Clips" directory is replaced by a zip
file containing the contents of the grandchild directory.
The script should be run from the archive directory, e.g.:
cd /Users/harold/archive
python create_clip_zip_files.py
On macOS, you can unzip the resulting zip files with "unzip \*.zip".
"""
from pathlib import Path
from zipfile import ZipFile
import re
DIR_NAME_RE = re.compile('\d\d\d')
def main():
dir_path = Path.cwd()
clips_dir_path = dir_path / 'Clips'
zips_dir_path = dir_path / 'Clips (zipped)'
for clip_dir_path in clips_dir_path.glob('*/*'):
parent_dir_name = clip_dir_path.parent.name
clip_dir_name = clip_dir_path.name
if DIR_NAME_RE.match(parent_dir_name) and \
DIR_NAME_RE.match(clip_dir_name):
parent_dir_path = zips_dir_path / parent_dir_name
parent_dir_path.mkdir(parents=True, exist_ok=True)
zip_file_name = f'{parent_dir_name}.{clip_dir_name}.zip'
zip_file_path = parent_dir_path / zip_file_name
print(f'Compressing "{parent_dir_name}/{clip_dir_name}"...')
with ZipFile(zip_file_path, 'w') as zip_file:
for clip_file_path in clip_dir_path.glob('*.wav'):
# print(f' Compressing file "{clip_file_path.name}"...')
rel_path = clip_file_path.relative_to(clip_dir_path.parent)
zip_file.write(str(clip_file_path), str(rel_path))
if __name__ == '__main__':
main()
| mit |
ttsubo/ryu | ryu/app/cbench.py | 51 | 1787 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2012 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A dumb OpenFlow 1.0 responder for benchmarking the controller framework.
Intended to be used with oflops cbench.
"""
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_0
class Cbench(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(Cbench, self).__init__(*args, **kwargs)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
match = datapath.ofproto_parser.OFPMatch(
ofproto_v1_0.OFPFW_ALL, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0)
mod = datapath.ofproto_parser.OFPFlowMod(
datapath, match=match, cookie=0, command=ofproto.OFPFC_ADD,
idle_timeout=0, hard_timeout=0,
priority=ofproto.OFP_DEFAULT_PRIORITY,
flags=0, actions=None)
datapath.send_msg(mod)
| apache-2.0 |
KlinkOnE/kyleopen-kernel | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
mlperf/training_results_v0.7 | Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/topi/tests/python/test_topi_reduce.py | 2 | 6995 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for reduce."""
import os
import numpy as np
import tvm
import topi
from common import get_all_backend
def _my_npy_argmax(arr, axis, keepdims):
if not keepdims:
return arr.argmax(axis=axis)
else:
if axis is not None:
out_shape = list(arr.shape)
out_shape[axis] = 1
else:
out_shape = [1 for _ in range(len(arr.shape))]
return arr.argmax(axis=axis).reshape(out_shape)
def _my_npy_argmin(arr, axis, keepdims):
if not keepdims:
return arr.argmin(axis=axis)
else:
out_shape = list(arr.shape)
out_shape[axis] = 1
return arr.argmin(axis=axis).reshape(out_shape)
def verify_reduce_map_ele(in_shape, axis, keepdims, type="sum", dtype="float32"):
# Build the logic and compile the function
A = tvm.placeholder(shape=in_shape, name="A", dtype=dtype)
A1 = topi.sqrt(topi.exp(A))
out_dtype = dtype
if type == "sum":
B = topi.sum(A1, axis=axis, keepdims=keepdims)
elif type == "all":
B = topi.all(A, axis=axis, keepdims=keepdims)
elif type == "max":
B = topi.max(A1, axis=axis, keepdims=keepdims)
elif type == "min":
B = topi.min(A1, axis=axis, keepdims=keepdims)
elif type == "argmax":
B = topi.argmax(A1, axis=axis, keepdims=keepdims)
out_dtype = "int32"
elif type == "argmin":
B = topi.argmin(A1, axis=axis, keepdims=keepdims)
out_dtype = "int32"
else:
raise NotImplementedError
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_reduce(B)
foo = tvm.build(s, [A, B], device, name=type)
# Test
if dtype == 'bool':
in_npy_map = in_npy = np.random.choice([True, False], size=in_shape)
else:
in_npy = np.random.uniform(-1, 1, size=in_shape).astype(dtype)
in_npy_map = np.sqrt(np.exp(in_npy)).astype(dtype)
if type == "sum":
out_npy = in_npy_map.sum(axis=axis, keepdims=keepdims)
elif type == "all" and dtype == 'bool':
out_npy = in_npy_map.all(axis=axis, keepdims=keepdims)
elif type == "max":
out_npy = in_npy_map.max(axis=axis, keepdims=keepdims)
elif type == "min":
out_npy = in_npy_map.min(axis=axis, keepdims=keepdims)
elif type == "argmax":
out_npy = _my_npy_argmax(in_npy_map, axis=axis, keepdims=keepdims)
elif type == "argmin":
out_npy = _my_npy_argmin(in_npy_map, axis=axis, keepdims=keepdims)
else:
raise NotImplementedError
data_tvm = tvm.nd.array(in_npy, ctx=ctx)
out_tvm = tvm.nd.empty(shape=out_npy.shape, ctx=ctx, dtype=out_dtype)
for _ in range(1):
foo(data_tvm, out_tvm)
if type == "argmax" or type == "argmin":
out_tvm_indices = out_tvm.asnumpy()
if keepdims:
out_tvm_indices = np.take(out_tvm_indices, indices=0, axis=axis)
if axis is None:
out_tvm_val = in_npy_map.ravel()[out_tvm_indices]
else:
other_indices = tuple(np.indices(in_shape[0:axis] + in_shape[(axis+1):]))
sel_indices = other_indices[0:axis] + (out_tvm_indices,) + other_indices[axis:]
out_tvm_val = in_npy_map[sel_indices]
if type == "argmax":
tvm.testing.assert_allclose(out_tvm_val, in_npy_map.max(axis=axis), 1E-3, 1E-3)
elif type == "argmin":
tvm.testing.assert_allclose(out_tvm_val, in_npy_map.min(axis=axis), 1E-3, 1E-3)
else:
tvm.testing.assert_allclose(out_tvm.asnumpy(), out_npy, 1E-3, 1E-3)
for device in get_all_backend():
check_device(device)
def test_reduce_map():
verify_reduce_map_ele(in_shape=(32,),
axis=0,
keepdims=False,
type="argmax")
verify_reduce_map_ele(in_shape=(128, 24, 128, 24),
axis=(1, 2, 3),
keepdims=True,
type="sum")
verify_reduce_map_ele(in_shape=(2, 3),
axis=None,
keepdims=True,
type="all",
dtype='bool')
verify_reduce_map_ele(in_shape=(128, 24 * 128 * 24),
axis=(1,),
keepdims=False,
type="max")
verify_reduce_map_ele(in_shape=(32, 128, 24),
axis=None,
keepdims=True,
type="sum")
verify_reduce_map_ele(in_shape=(32, 128, 24),
axis=None,
keepdims=True,
dtype='bool',
type="all")
verify_reduce_map_ele(in_shape=(128, 24, 128, 24),
axis=(0, 2),
keepdims=False,
type="min")
verify_reduce_map_ele(in_shape=(32, 128),
axis=1,
keepdims=True,
type="argmax")
verify_reduce_map_ele(in_shape=(32, 24, 32, 24),
axis=2,
keepdims=False,
type="argmin")
verify_reduce_map_ele(in_shape=(31, 21, 15),
axis=None,
keepdims=True,
type="argmax")
verify_reduce_map_ele(in_shape=(31, 21, 15),
axis=None,
keepdims=False,
type="sum")
verify_reduce_map_ele(in_shape=(128, 24, 128, 24),
axis=(1, 2, 3),
keepdims=True,
type="sum",
dtype="float64")
if __name__ == "__main__":
test_reduce_map()
| apache-2.0 |
joonro/PyTables | tables/filters.py | 6 | 14264 | # -*- coding: utf-8 -*-
########################################################################
#
# License: BSD
# Created: 2007-02-23
# Author: Ivan Vilata i Balaguer - ivan at selidor dot net
#
# $Id$
#
########################################################################
"""Functionality related with filters in a PyTables file."""
# Imports
# =======
import warnings
import numpy
from tables import (
utilsextension, blosc_compressor_list, blosc_compcode_to_compname)
from tables.exceptions import FiltersWarning
# Public variables
# ================
__docformat__ = 'reStructuredText'
"""The format of documentation strings in this module."""
all_complibs = ['zlib', 'lzo', 'bzip2', 'blosc']
all_complibs += ['blosc:%s' % cname for cname in blosc_compressor_list()]
"""List of all compression libraries."""
foreign_complibs = ['szip']
"""List of known but unsupported compression libraries."""
default_complib = 'zlib'
"""The default compression library."""
# Private variables
# =================
_shuffle_flag = 0x1
_fletcher32_flag = 0x2
_rounding_flag = 0x4
# Classes
# =======
class Filters(object):
"""Container for filter properties.
This class is meant to serve as a container that keeps information about
the filter properties associated with the chunked leaves, that is Table,
CArray, EArray and VLArray.
Instances of this class can be directly compared for equality.
Parameters
----------
complevel : int
Specifies a compression level for data. The allowed
range is 0-9. A value of 0 (the default) disables
compression.
complib : str
Specifies the compression library to be used. Right now, 'zlib' (the
default), 'lzo', 'bzip2' and 'blosc' are supported. Additional
compressors for Blosc like 'blosc:blosclz' ('blosclz' is the default
in case the additional compressor is not specified), 'blosc:lz4',
'blosc:lz4hc', 'blosc:snappy' and 'blosc:zlib' are supported too.
Specifying a compression library which is not available in the
system issues a FiltersWarning and sets the library to the default
one.
shuffle : bool
Whether or not to use the *Shuffle*
filter in the HDF5 library. This is normally used to improve
the compression ratio. A false value disables shuffling and
a true one enables it. The default value depends on whether
compression is enabled or not; if compression is enabled,
shuffling defaults to be enabled, else shuffling is
disabled. Shuffling can only be used when compression is enabled.
fletcher32 : bool
Whether or not to use the
*Fletcher32* filter in the HDF5 library.
This is used to add a checksum on each data chunk. A false
value (the default) disables the checksum.
least_significant_digit : int
If specified, data will be truncated (quantized). In conjunction
with enabling compression, this produces 'lossy', but
significantly more efficient compression. For example, if
*least_significant_digit=1*, data will be quantized using
``around(scale*data)/scale``, where ``scale = 2**bits``, and
bits is determined so that a precision of 0.1 is retained (in
this case bits=4). Default is *None*, or no quantization.
.. note::
quantization is only applied if some form of compression is
enabled
Examples
--------
This is a small example on using the Filters class::
import numpy
from tables import *
fileh = open_file('test5.h5', mode='w')
atom = Float32Atom()
filters = Filters(complevel=1, complib='blosc', fletcher32=True)
arr = fileh.create_earray(fileh.root, 'earray', atom, (0,2),
"A growable array", filters=filters)
# Append several rows in only one call
arr.append(numpy.array([[1., 2.],
[2., 3.],
[3., 4.]], dtype=numpy.float32))
# Print information on that enlargeable array
print("Result Array:")
print(repr(arr))
fileh.close()
This enforces the use of the Blosc library, a compression level of 1 and a
Fletcher32 checksum filter as well. See the output of this example::
Result Array:
/earray (EArray(3, 2), fletcher32, shuffle, blosc(1)) 'A growable array'
type = float32
shape = (3, 2)
itemsize = 4
nrows = 3
extdim = 0
flavor = 'numpy'
byteorder = 'little'
.. rubric:: Filters attributes
.. attribute:: fletcher32
Whether the *Fletcher32* filter is active or not.
.. attribute:: complevel
The compression level (0 disables compression).
.. attribute:: complib
The compression filter used (irrelevant when compression is not
enabled).
.. attribute:: shuffle
Whether the *Shuffle* filter is active or not.
"""
@classmethod
def _from_leaf(class_, leaf):
# Get a dictionary with all the filters
parent = leaf._v_parent
filters_dict = utilsextension.get_filters(parent._v_objectid,
leaf._v_name)
if filters_dict is None:
filters_dict = {} # not chunked
kwargs = dict(complevel=0, shuffle=False, fletcher32=False, # all off
least_significant_digit=None, _new=False)
for (name, values) in filters_dict.iteritems():
if name == 'deflate':
name = 'zlib'
if name in all_complibs:
kwargs['complib'] = name
if name == "blosc":
kwargs['complevel'] = values[4]
# Shuffle filter is internal to blosc
if values[5]:
kwargs['shuffle'] = True
# In Blosc 1.3 another parameter is used for the compressor
if len(values) > 6:
cname = blosc_compcode_to_compname(values[6])
kwargs['complib'] = "blosc:%s" % cname
else:
kwargs['complevel'] = values[0]
elif name in foreign_complibs:
kwargs['complib'] = name
kwargs['complevel'] = 1 # any nonzero value will do
elif name in ['shuffle', 'fletcher32']:
kwargs[name] = True
return class_(**kwargs)
@classmethod
def _unpack(class_, packed):
"""Create a new `Filters` object from a packed version.
>>> Filters._unpack(0)
Filters(complevel=0, shuffle=False, fletcher32=False, least_significant_digit=None)
>>> Filters._unpack(0x101)
Filters(complevel=1, complib='zlib', shuffle=False, fletcher32=False, least_significant_digit=None)
>>> Filters._unpack(0x30109)
Filters(complevel=9, complib='zlib', shuffle=True, fletcher32=True, least_significant_digit=None)
>>> Filters._unpack(0x3010A)
Traceback (most recent call last):
...
ValueError: compression level must be between 0 and 9
>>> Filters._unpack(0x1)
Traceback (most recent call last):
...
ValueError: invalid compression library id: 0
"""
kwargs = {'_new': False}
# Byte 0: compression level.
kwargs['complevel'] = complevel = packed & 0xff
packed >>= 8
# Byte 1: compression library id (0 for none).
if complevel > 0:
complib_id = int(packed & 0xff)
if not (0 < complib_id <= len(all_complibs)):
raise ValueError("invalid compression library id: %d"
% complib_id)
kwargs['complib'] = all_complibs[complib_id - 1]
packed >>= 8
# Byte 2: parameterless filters.
kwargs['shuffle'] = packed & _shuffle_flag
kwargs['fletcher32'] = packed & _fletcher32_flag
has_rounding = packed & _rounding_flag
packed >>= 8
# Byte 3: least significant digit.
if has_rounding:
kwargs['least_significant_digit'] = numpy.int8(packed & 0xff)
else:
kwargs['least_significant_digit'] = None
return class_(**kwargs)
def _pack(self):
"""Pack the `Filters` object into a 64-bit NumPy integer."""
packed = numpy.int64(0)
# Byte 3: least significant digit.
if self.least_significant_digit is not None:
#assert isinstance(self.least_significant_digit, numpy.int8)
packed |= self.least_significant_digit
packed <<= 8
# Byte 2: parameterless filters.
if self.shuffle:
packed |= _shuffle_flag
if self.fletcher32:
packed |= _fletcher32_flag
if self.least_significant_digit:
packed |= _rounding_flag
packed <<= 8
# Byte 1: compression library id (0 for none).
if self.complevel > 0:
packed |= all_complibs.index(self.complib) + 1
packed <<= 8
# Byte 0: compression level.
packed |= self.complevel
return packed
def __init__(self, complevel=0, complib=default_complib,
shuffle=True, fletcher32=False,
least_significant_digit=None, _new=True):
if not (0 <= complevel <= 9):
raise ValueError("compression level must be between 0 and 9")
if _new and complevel > 0:
# These checks are not performed when loading filters from disk.
if complib not in all_complibs:
raise ValueError(
"compression library ``%s`` is not supported; "
"it must be one of: %s"
% (complib, ", ".join(all_complibs)))
if utilsextension.which_lib_version(complib) is None:
warnings.warn("compression library ``%s`` is not available; "
"using ``%s`` instead"
% (complib, default_complib), FiltersWarning)
complib = default_complib # always available
complevel = int(complevel)
complib = str(complib)
shuffle = bool(shuffle)
fletcher32 = bool(fletcher32)
if least_significant_digit is not None:
least_significant_digit = numpy.int8(least_significant_digit)
if complevel == 0:
# Override some inputs when compression is not enabled.
complib = None # make it clear there is no compression
shuffle = False # shuffling and not compressing makes no sense
least_significant_digit = None
elif complib not in all_complibs:
# Do not try to use a meaningful level for unsupported libs.
complevel = -1
self.complevel = complevel
"""The compression level (0 disables compression)."""
self.complib = complib
"""The compression filter used (irrelevant when compression is
not enabled).
"""
self.shuffle = shuffle
"""Whether the *Shuffle* filter is active or not."""
self.fletcher32 = fletcher32
"""Whether the *Fletcher32* filter is active or not."""
self.least_significant_digit = least_significant_digit
"""The least significant digit to which data shall be truncated."""
def __repr__(self):
args, complevel = [], self.complevel
if complevel >= 0: # meaningful compression level
args.append('complevel=%d' % complevel)
if complevel != 0: # compression enabled (-1 or > 0)
args.append('complib=%r' % self.complib)
args.append('shuffle=%s' % self.shuffle)
args.append('fletcher32=%s' % self.fletcher32)
args.append(
'least_significant_digit=%s' % self.least_significant_digit)
return '%s(%s)' % (self.__class__.__name__, ', '.join(args))
def __str__(self):
return repr(self)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
for attr in self.__dict__:
if getattr(self, attr) != getattr(other, attr):
return False
return True
# XXX: API incompatible change for PyTables 3 line
# Overriding __eq__ blocks inheritance of __hash__ in 3.x
# def __hash__(self):
# return hash((self.__class__, self.complevel, self.complib,
# self.shuffle, self.fletcher32))
def copy(self, **override):
"""Get a copy of the filters, possibly overriding some arguments.
Constructor arguments to be overridden must be passed as keyword
arguments.
Using this method is recommended over replacing the attributes of an
instance, since instances of this class may become immutable in the
future::
>>> filters1 = Filters()
>>> filters2 = filters1.copy()
>>> filters1 == filters2
True
>>> filters1 is filters2
False
>>> filters3 = filters1.copy(complevel=1) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: compression library ``None`` is not supported...
>>> filters3 = filters1.copy(complevel=1, complib='zlib')
>>> print(filters1)
Filters(complevel=0, shuffle=False, fletcher32=False, least_significant_digit=None)
>>> print(filters3)
Filters(complevel=1, complib='zlib', shuffle=False, fletcher32=False, least_significant_digit=None)
>>> filters1.copy(foobar=42)
Traceback (most recent call last):
...
TypeError: __init__() got an unexpected keyword argument 'foobar'
"""
newargs = self.__dict__.copy()
newargs.update(override)
return self.__class__(**newargs)
# Main part
# =========
def _test():
"""Run ``doctest`` on this module."""
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
| bsd-3-clause |
mizdebsk/javapackages | test/osgi_req_test.py | 1 | 2463 | import unittest
from test_common import *
class TestOSGiReq(unittest.TestCase):
@osgireq(["basic/buildroot/usr/share/META-INF/MANIFEST.MF"])
def test_basic(self, stdout, stderr, return_value):
self.assertEqual(return_value, 0, stderr)
sout = [x for x in stdout.split('\n') if x]
assertIn(self, "osgi(org.hamcrest.core)", sout)
self.assertEqual(len(sout), 1)
@osgireq(["basic_jar/buildroot/usr/lib/basic.jar"])
def test_basic_jar(self, stdout, stderr, return_value):
self.assertEqual(return_value, 0, stderr)
sout = [x for x in stdout.split('\n') if x]
assertIn(self, "osgi(org.hamcrest.core)", sout)
self.assertEqual(len(sout), 1)
@osgireq(["symlink/buildroot/usr/share/java/foo/META-INF/MANIFEST.MF"])
def test_symlink(self, stdout, stderr, return_value):
self.assertEqual(return_value, 0, stderr)
sout = [x for x in stdout.split('\n') if x]
self.assertEqual(len(sout), 0)
@osgireq(["symlink_jar/buildroot/usr/share/java/foo/basic.jar"])
def test_symlink_jar(self, stdout, stderr, return_value):
self.assertEqual(return_value, 0, stderr)
sout = [x for x in stdout.split('\n') if x]
self.assertEqual(len(sout), 0)
@osgireq(["symlink_dir/buildroot/usr/share/java/foo/META-INF/MANIFEST.MF"])
def test_symlink_dir(self, stdout, stderr, return_value):
self.assertEqual(return_value, 0, stderr)
sout = [x for x in stdout.split('\n') if x]
self.assertEqual(len(sout), 0)
@osgireq(["symlink_dir_jar/buildroot/usr/share/java/foo/basic.jar"])
def test_symlink_dir_jar(self, stdout, stderr, return_value):
self.assertEqual(return_value, 0, stderr)
sout = [x for x in stdout.split('\n') if x]
self.assertEqual(len(sout), 0)
@osgireq(["empty/META-INF/MANIFEST.MF"])
def test_empty(self, stdout, stderr, return_value):
self.assertEqual(return_value, 0, stderr)
sout = [x for x in stdout.split('\n') if x]
self.assertEqual(len(sout), 0)
@osgireq(["self_require/buildroot/usr/share/java/foo/META-INF/MANIFEST.MF"])
def test_self_require(self, stdout, stderr, return_value):
self.assertEqual(return_value, 0, stderr)
sout = [x for x in stdout.split('\n') if x]
assertIn(self, "osgi(bar) = 4.5.6", sout)
self.assertEqual(len(sout), 1, sout)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
olasitarska/django | django/conf/locale/zh_Hans/formats.py | 634 | 1810 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y年n月j日' # 2016年9月5日
TIME_FORMAT = 'H:i' # 20:45
DATETIME_FORMAT = 'Y年n月j日 H:i' # 2016年9月5日 20:45
YEAR_MONTH_FORMAT = 'Y年n月' # 2016年9月
MONTH_DAY_FORMAT = 'm月j日' # 9月5日
SHORT_DATE_FORMAT = 'Y年n月j日' # 2016年9月5日
SHORT_DATETIME_FORMAT = 'Y年n月j日 H:i' # 2016年9月5日 20:45
FIRST_DAY_OF_WEEK = 1 # 星期一 (Monday)
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y/%m/%d', # '2016/09/05'
'%Y-%m-%d', # '2016-09-05'
'%Y年%n月%j日', # '2016年9月5日'
)
TIME_INPUT_FORMATS = (
'%H:%M', # '20:45'
'%H:%M:%S', # '20:45:29'
'%H:%M:%S.%f', # '20:45:29.000200'
)
DATETIME_INPUT_FORMATS = (
'%Y/%m/%d %H:%M', # '2016/09/05 20:45'
'%Y-%m-%d %H:%M', # '2016-09-05 20:45'
'%Y年%n月%j日 %H:%M', # '2016年9月5日 14:45'
'%Y/%m/%d %H:%M:%S', # '2016/09/05 20:45:29'
'%Y-%m-%d %H:%M:%S', # '2016-09-05 20:45:29'
'%Y年%n月%j日 %H:%M:%S', # '2016年9月5日 20:45:29'
'%Y/%m/%d %H:%M:%S.%f', # '2016/09/05 20:45:29.000200'
'%Y-%m-%d %H:%M:%S.%f', # '2016-09-05 20:45:29.000200'
'%Y年%n月%j日 %H:%n:%S.%f', # '2016年9月5日 20:45:29.000200'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ''
NUMBER_GROUPING = 4
| bsd-3-clause |
debugger06/MiroX | windows/plat/config.py | 2 | 8122 | # Miro - an RSS based video player application
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
# Participatory Culture Foundation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
#
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
import os
import _winreg
import cPickle
import logging
import string
import tempfile
import traceback
import shutil
from miro import app
from miro import prefs
from miro import util
from miro import u3info
from miro import fileutil
from miro.plat import proxyfind
from miro.plat import resources
from miro.plat import specialfolders
def _get_support_directory():
if u3info.u3_active:
path = u3info.APP_DATA_PREFIX
else:
# We don't get the publisher and long app name from the config so
# changing the app name doesn't change the support directory
path = os.path.join(specialfolders.app_data_directory,
u'Participatory Culture Foundation',
u'Miro',
u'Support')
try:
fileutil.makedirs(path)
except:
pass
return path
def _get_config_file():
return fileutil.expand_filename(
os.path.join(_get_support_directory(), "preferences.bin"))
def load():
save_file = _get_config_file()
# if Miro died while saving the config file, then it's likely there's
# a save_file.new floating around and that's the one we want to use.
new_save_file = save_file + ".new"
if os.path.exists(new_save_file):
save_file = new_save_file
if os.path.exists(save_file):
try:
return cPickle.load(open(save_file))
except cPickle.UnpicklingError:
logging.exception("error loading config")
return {}
def save(data):
# save to a new file and if that's successful then rename it.
# this reduces the chance that the user ends up with a hosed
# preferences file.
save_file = _get_config_file()
new_file = save_file + ".new"
try:
f = open(new_file, 'w')
cPickle.dump(data, f)
f.close()
if not os.path.exists(save_file):
shutil.move(new_file, save_file)
return
os.remove(save_file)
shutil.move(new_file, save_file)
except:
raise
def get(descriptor):
if descriptor == prefs.MOVIES_DIRECTORY:
return os.path.join(
specialfolders.base_movies_directory,
app.configfile['shortAppName'])
elif descriptor == prefs.NON_VIDEO_DIRECTORY:
return specialfolders.non_video_directory
elif descriptor == prefs.GETTEXT_PATHNAME:
return resources.path("locale")
elif descriptor == prefs.SUPPORT_DIRECTORY:
return fileutil.expand_filename(_get_support_directory())
elif descriptor == prefs.ICON_CACHE_DIRECTORY:
return os.path.join(_get_support_directory(), 'icon-cache')
elif descriptor == prefs.COVER_ART_DIRECTORY:
return os.path.join(_get_support_directory(), 'cover-art')
elif descriptor == prefs.SQLITE_PATHNAME:
path = get(prefs.SUPPORT_DIRECTORY)
return os.path.join(path, 'sqlitedb')
elif descriptor == prefs.CRASH_PATHNAME:
directory = tempfile.gettempdir()
return os.path.join(directory, "crashes")
elif descriptor == prefs.LOG_PATHNAME:
if u3info.u3_active:
directory = u3info.app_data_path
else:
directory = tempfile.gettempdir()
return os.path.join(directory,
('%s.log' % app.configfile['shortAppName']))
elif descriptor == prefs.DOWNLOADER_LOG_PATHNAME:
if u3info.u3_active:
directory = u3info.app_data_path
else:
directory = tempfile.gettempdir()
return os.path.join(directory,
('%s-downloader.log' % app.configfile['shortAppName']))
elif descriptor == prefs.HELPER_LOG_PATHNAME:
if u3info.u3_active:
directory = u3info.app_data_path
else:
directory = tempfile.gettempdir()
return os.path.join(directory,
('%s-helper.log' % app.configfile['shortAppName']))
elif descriptor == prefs.RUN_AT_STARTUP:
import logging
# we use the legacy startup registry key, so legacy versions
# of Windows have a chance
# http://support.microsoft.com/?kbid=270035
try:
folder = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
"Software\Microsoft\Windows\CurrentVersion\Run")
except WindowsError, e:
# 2 indicates that the key doesn't exist yet, so
# RUN_AT_STARTUP is clearly False
if e.errno == 2:
logging.exception("windowserror kicked up at open key")
return False
raise
long_app_name = app.configfile['longAppName']
count = 0
while True:
try:
name, val, type_ = _winreg.EnumValue(folder, count)
count += 1
if name == long_app_name:
return True
except WindowsError, e:
# 22 indicates there are no more items in this folder
# to iterate through.
if e.errno == 22:
return False
else:
raise
return False
elif descriptor == prefs.HTTP_PROXY_ACTIVE:
proxy_info = proxyfind.get_proxy_info()
return proxy_info.host is not None
elif descriptor == prefs.HTTP_PROXY_HOST:
proxy_info = proxyfind.get_proxy_info()
return proxy_info.host
elif descriptor == prefs.HTTP_PROXY_PORT:
proxy_info = proxyfind.get_proxy_info()
return proxy_info.port
elif descriptor == prefs.HTTP_PROXY_IGNORE_HOSTS:
proxy_info = proxyfind.get_proxy_info()
return proxy_info.ignore_hosts
elif descriptor == prefs.AUTOUPDATE_URL:
if app.configfile.contains(descriptor.key):
default = app.configfile.get(descriptor.key)
else:
default = u'http://miro-updates.participatoryculture.org' \
'/democracy-appcast-windows.xml'
return prefs.get_from_environ('DTV_AUTOUPDATE_URL', default)
elif descriptor == prefs.AUTOUPDATE_BETA_URL:
if app.configfile.contains(descriptor.key):
default = app.configfile.get(descriptor.key)
else:
default = u'http://miro-updates.participatoryculture.org' \
'/democracy-appcast-windows-beta.xml'
return prefs.get_from_environ('DTV_AUTOUPDATE_BETA_URL', default)
# Proxy authorization isn't suppored on windows, so the following
# keys are ignored:
#
# HTTP_PROXY_AUTHORIZATION_ACTIVE
# HTTP_PROXY_AUTHORIZATION_USERNAME
# HTTP_PROXY_AUTHORIZATION_PASSWORD
else:
return descriptor.default
| gpl-2.0 |
upndwn4par/android_external_skia | tools/generate_fir_coeff.py | 198 | 4546 | #!/usr/bin/python
'''
Copyright 2013 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
'''
import math
import pprint
def withinStdDev(n):
"""Returns the percent of samples within n std deviations of the normal."""
return math.erf(n / math.sqrt(2))
def withinStdDevRange(a, b):
"""Returns the percent of samples within the std deviation range a, b"""
if b < a:
return 0;
if a < 0:
if b < 0:
return (withinStdDev(-a) - withinStdDev(-b)) / 2;
else:
return (withinStdDev(-a) + withinStdDev(b)) / 2;
else:
return (withinStdDev(b) - withinStdDev(a)) / 2;
#We have a bunch of smudged samples which represent the average coverage of a range.
#We have a 'center' which may not line up with those samples.
#From the 'center' we want to make a normal approximation where '5' sample width out we're at '3' std deviations.
#The first and last samples may not be fully covered.
#This is the sub-sample shift for each set of FIR coefficients (the centers of the lcds in the samples)
#Each subpxl takes up 1/3 of a pixel, so they are centered at x=(i/n+1/2n), or 1/6, 3/6, 5/6 of a pixel.
#Each sample takes up 1/4 of a pixel, so the results fall at (x*4)%1, or 2/3, 0, 1/3 of a sample.
samples_per_pixel = 4
subpxls_per_pixel = 3
#sample_offsets is (frac, int) in sample units.
sample_offsets = [math.modf((float(subpxl_index)/subpxls_per_pixel + 1.0/(2.0*subpxls_per_pixel))*samples_per_pixel) for subpxl_index in range(subpxls_per_pixel)]
#How many samples to consider to the left and right of the subpxl center.
sample_units_width = 5
#The std deviation at sample_units_width.
std_dev_max = 3
#The target sum is in some fixed point representation.
#Values larger the 1 in fixed point simulate ink spread.
target_sum = 0x110
for sample_offset, sample_align in sample_offsets:
coeffs = []
coeffs_rounded = []
#We start at sample_offset - sample_units_width
current_sample_left = sample_offset - sample_units_width
current_std_dev_left = -std_dev_max
done = False
while not done:
current_sample_right = math.floor(current_sample_left + 1)
if current_sample_right > sample_offset + sample_units_width:
done = True
current_sample_right = sample_offset + sample_units_width
current_std_dev_right = current_std_dev_left + ((current_sample_right - current_sample_left) / sample_units_width) * std_dev_max
coverage = withinStdDevRange(current_std_dev_left, current_std_dev_right)
coeffs.append(coverage * target_sum)
coeffs_rounded.append(int(round(coverage * target_sum)))
current_sample_left = current_sample_right
current_std_dev_left = current_std_dev_right
# Now we have the numbers we want, but our rounding needs to add up to target_sum.
delta = 0
coeffs_rounded_sum = sum(coeffs_rounded)
if coeffs_rounded_sum > target_sum:
# The coeffs add up to too much. Subtract 1 from the ones which were rounded up the most.
delta = -1
if coeffs_rounded_sum < target_sum:
# The coeffs add up to too little. Add 1 to the ones which were rounded down the most.
delta = 1
if delta:
print "Initial sum is 0x%0.2X, adjusting." % (coeffs_rounded_sum,)
coeff_diff = [(coeff_rounded - coeff) * delta
for coeff, coeff_rounded in zip(coeffs, coeffs_rounded)]
class IndexTracker:
def __init__(self, index, item):
self.index = index
self.item = item
def __lt__(self, other):
return self.item < other.item
def __repr__(self):
return "arr[%d] == %s" % (self.index, repr(self.item))
coeff_pkg = [IndexTracker(i, diff) for i, diff in enumerate(coeff_diff)]
coeff_pkg.sort()
# num_elements_to_force_round had better be < (2 * sample_units_width + 1) or
# * our math was wildy wrong
# * an awful lot of the curve is out side our sample
# either is pretty bad, and probably means the results will not be useful.
num_elements_to_force_round = abs(coeffs_rounded_sum - target_sum)
for i in xrange(num_elements_to_force_round):
print "Adding %d to index %d to force round %f." % (delta, coeff_pkg[i].index, coeffs[coeff_pkg[i].index])
coeffs_rounded[coeff_pkg[i].index] += delta
print "Prepending %d 0x00 for allignment." % (sample_align,)
coeffs_rounded_aligned = ([0] * int(sample_align)) + coeffs_rounded
print ', '.join(["0x%0.2X" % coeff_rounded for coeff_rounded in coeffs_rounded_aligned])
print sum(coeffs), hex(sum(coeffs_rounded))
print
| bsd-3-clause |
ibmsoe/tensorflow | tensorflow/python/client/session_test.py | 4 | 67123 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import threading
import time
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
# NOTE(mrry): Dummy shape registration for ops used in the tests, since they
# don't have C++ op registrations on which to attach C++ shape fns.
ops.RegisterShape('ConstructionFails')(common_shapes.unknown_shape)
class SessionTest(test_util.TensorFlowTestCase):
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, shape=[2, 3], name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(np.asarray([[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]],
dtype=np.float32), copy_val)
def testManyCPUs(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testPerSessionThreads(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testSessionInterOpThreadPool(self):
config = config_pb2.ConfigProto()
pool = config.session_inter_op_thread_pool.add()
with session.Session(config=config) as s:
inp = constant_op.constant(10.0, name='W1')
results = s.run([inp])
self.assertAllEqual([10.0], results)
pool = config.session_inter_op_thread_pool.add()
pool.num_threads = 1
with session.Session(config=config) as s:
inp = constant_op.constant(20.0, name='W2')
results = s.run([inp])
self.assertAllEqual([20.0], results)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
def testErrorCodeWithNoNodeDef(self):
with session.Session() as s:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
def exc_predicate(e):
return (e.op is None and e.node_def is None and
e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
# Run with a bogus handle.
s.partial_run('foo', r1, feed_dict={a: 1, b: 2})
def testOpConstructionErrorPayload(self):
with session.Session():
failing_op = ops.get_default_graph().create_op(
'ConstructionFails', [], [], name='f')
def exc_predicate(e):
return (e.op == failing_op
and e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
failing_op.run()
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
# pylint: disable=protected-access
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(dtypes.float32)
# pylint: enable=protected-access
def exc_predicate(e):
return (e.op == c.op
and e.op._original_op == b.op
and e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchNone(self):
with session.Session() as s:
a = constant_op.constant(1.0)
with self.assertRaises(TypeError):
s.run(None)
with self.assertRaises(TypeError):
s.run([None])
with self.assertRaises(TypeError):
s.run({'b': None})
with self.assertRaises(TypeError):
s.run({'a': a, 'b': None})
def testFetchSingleton(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
tensor_runner = sess.make_callable(a)
res = tensor_runner()
self.assertEqual(42.0, res)
op_runner = sess.make_callable(a.op)
res = op_runner()
self.assertEqual(None, res)
def testFetchSingletonByName(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a.name)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
def testFetchList(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
v = variables.Variable([54.0])
assign = v.assign([63.0])
res = sess.run([a, b, c, a.name, assign.op])
self.assertTrue(isinstance(res, list))
self.assertEqual([42.0, None, 44.0, 42.0, None], res)
list_runner = sess.make_callable([a, b, c, a.name, assign.op])
res = list_runner()
self.assertTrue(isinstance(res, list))
self.assertEqual([42.0, None, 44.0, 42.0, None], res)
def testFetchTuple(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run((a, b, c, a.name))
self.assertTrue(isinstance(res, tuple))
self.assertEqual((42.0, None, 44.0, 42.0), res)
tuple_runner = sess.make_callable((a, b, c, a.name))
res = tuple_runner()
self.assertTrue(isinstance(res, tuple))
self.assertEqual((42.0, None, 44.0, 42.0), res)
def testFetchNamedTuple(self):
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
# pylint: enable=invalid-name
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(ABC(a, b, c))
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
namedtuple_runner = sess.make_callable(ABC(a, b, c))
res = namedtuple_runner()
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
def testFetchDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run({'a': a, 'b': b, 'c': c})
self.assertTrue(isinstance(res, dict))
self.assertEqual(42.0, res['a'])
self.assertEqual(None, res['b'])
self.assertEqual(44.0, res['c'])
def testFetchOrderedDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(collections.OrderedDict([(3, a), (2, b), (1, c)]))
self.assertTrue(isinstance(res, collections.OrderedDict))
self.assertEqual([3, 2, 1], list(res.keys()))
self.assertEqual(42.0, res[3])
self.assertEqual(None, res[2])
self.assertEqual(44.0, res[1])
def testFetchNestingEmptyOneLevel(self):
with session.Session() as sess:
a_val = 11.0
a = constant_op.constant(a_val)
res = sess.run([[], tuple(), {}])
self.assertTrue(isinstance(res, list))
self.assertEquals(3, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(0, len(res[0]))
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(0, len(res[1]))
self.assertTrue(isinstance(res[2], dict))
self.assertEqual(0, len(res[2]))
res = sess.run([[], tuple(), {}, a])
self.assertTrue(isinstance(res, list))
self.assertEquals(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(0, len(res[0]))
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(0, len(res[1]))
self.assertTrue(isinstance(res[2], dict))
self.assertEqual(0, len(res[2]))
self.assertEqual(a_val, res[3])
def testFetchNestingOneLevel(self):
with session.Session() as sess:
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
DEFG = collections.namedtuple('DEFG', ['d', 'e', 'f', 'g'])
# pylint: enable=invalid-name
a_val = 42.0
b_val = None
c_val = 44.0
a = constant_op.constant(a_val)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(c_val)
# List of lists, tuples, namedtuple, and dict
res = sess.run([[a, b, c], (a, b, c), ABC(a=a, b=b, c=c),
{'a': a.name, 'c': c, 'b': b}])
self.assertTrue(isinstance(res, list))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(3, len(res[3]))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Tuple of lists, tuples, namedtuple, and dict
res = sess.run(([a, b, c], (a.name, b, c), ABC(a=a, b=b, c=c),
{'a': a, 'c': c, 'b': b}))
self.assertTrue(isinstance(res, tuple))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(3, len(res[3]))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Namedtuple of lists, tuples, namedtuples, and dict
res = sess.run(DEFG(d=[a, b, c],
e=(a, b, c),
f=ABC(a=a.name, b=b, c=c),
g={'a': a, 'c': c, 'b': b}))
self.assertTrue(isinstance(res, DEFG))
self.assertTrue(isinstance(res.d, list))
self.assertEqual(3, len(res.d))
self.assertEqual(a_val, res.d[0])
self.assertEqual(b_val, res.d[1])
self.assertEqual(c_val, res.d[2])
self.assertTrue(isinstance(res.e, tuple))
self.assertEqual(3, len(res.e))
self.assertEqual(a_val, res.e[0])
self.assertEqual(b_val, res.e[1])
self.assertEqual(c_val, res.e[2])
self.assertTrue(isinstance(res.f, ABC))
self.assertEqual(a_val, res.f.a)
self.assertEqual(b_val, res.f.b)
self.assertEqual(c_val, res.f.c)
self.assertTrue(isinstance(res.g, dict))
self.assertEqual(3, len(res.g))
self.assertEqual(a_val, res.g['a'])
self.assertEqual(b_val, res.g['b'])
self.assertEqual(c_val, res.g['c'])
# Dict of lists, tuples, namedtuples, and dict
res = sess.run({'d': [a, b, c],
'e': (a, b, c),
'f': ABC(a=a, b=b, c=c),
'g': {'a': a.name, 'c': c, 'b': b}})
self.assertTrue(isinstance(res, dict))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res['d'], list))
self.assertEqual(3, len(res['d']))
self.assertEqual(a_val, res['d'][0])
self.assertEqual(b_val, res['d'][1])
self.assertEqual(c_val, res['d'][2])
self.assertTrue(isinstance(res['e'], tuple))
self.assertEqual(3, len(res['e']))
self.assertEqual(a_val, res['e'][0])
self.assertEqual(b_val, res['e'][1])
self.assertEqual(c_val, res['e'][2])
self.assertTrue(isinstance(res['f'], ABC))
self.assertEqual(a_val, res['f'].a)
self.assertEqual(b_val, res['f'].b)
self.assertEqual(c_val, res['f'].c)
self.assertTrue(isinstance(res['g'], dict))
self.assertEqual(3, len(res['g']))
self.assertEqual(a_val, res['g']['a'])
self.assertEqual(b_val, res['g']['b'])
self.assertEqual(c_val, res['g']['c'])
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
results_with_dict = s.run({'a': [a], 'b': b, 'z': [a, b]})
self.assertAllEqual([[1.0, 1.0]], results_with_dict['a'][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_dict['b'])
self.assertAllEqual(results_with_dict['a'][0], results_with_dict['z'][0])
self.assertAllEqual(results_with_dict['b'], results_with_dict['z'][1])
# Test nested structures
results_with_nested_list = s.run([[[a, b], b], a, [a, b]])
self.assertAllEqual([[1.0, 1.0]], results_with_nested_list[0][0][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_nested_list[0][0][1])
self.assertAllEqual(results_with_nested_list[0][0][0],
results_with_nested_list[1])
self.assertAllEqual(results_with_nested_list[1],
results_with_nested_list[2][0])
self.assertAllEqual(results_with_nested_list[0][0][1],
results_with_nested_list[0][1])
self.assertAllEqual(results_with_nested_list[0][1],
results_with_nested_list[2][1])
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float16, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Dict fetch
xy = s.run({'xy': tf_xy})['xy']
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Nested list fetch
xy = s.run([[[tf_xy]], tf_xy, [tf_xy]])
self.assertAllEqual(xy, [[[x + y]], x + y, [x + y]])
self.assertEqual(scalar, type(xy[0][0][0]))
self.assertEqual(scalar, type(xy[1]))
self.assertEqual(scalar, type(xy[2][0]))
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
constant_op.constant(indices),
constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Dict fetch (single value), use as tuple
indices_out, values_out, shape_out = s.run({'sp': sp})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch (list value), use as tuple
(indices_out, values_out, shape_out), = s.run({'sp': [sp]})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch, use as SparseTensorValue
sp_out = s.run({'sp': sp})['sp']
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Nested list fetch use as tuple
sp_out = s.run([[[sp]], sp])
indices_out, values_out, shape_out = sp_out[0][0][0]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
indices_out, values_out, shape_out = sp_out[1]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Nested list fetch, use as SparseTensorValue
sp_out = s.run([[[sp]], sp])
self.assertAllEqual(sp_out[0][0][0].indices, indices)
self.assertAllEqual(sp_out[0][0][0].values, values)
self.assertAllEqual(sp_out[0][0][0].dense_shape, shape)
self.assertAllEqual(sp_out[1].indices, indices)
self.assertAllEqual(sp_out[1].values, values)
self.assertAllEqual(sp_out[1].dense_shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with tuple, fetch sp directly
sp_out = s.run(sp, {sp: (indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(
sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
# Feed SparseTensorValue and fetch sp directly.
sp_out = s.run(
sp, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
def testFeedSparsePlaceholder(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(
sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderPartialShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(
shape=[None, 9, 2], dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(
sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderConstantShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32,
shape=shape,
name='placeholder1')
self.assertAllEqual(sp.dense_shape.eval(session=s), shape)
self.assertAllEqual(tensor_util.constant_value(sp.dense_shape), shape)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
def testFetchIndexedSlices(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices),
constant_op.constant(dense_shape))
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlices(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
array_ops.placeholder(dtype=np.int64,
shape=(3,)),)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind_dense_shape = array_ops.identity(ind.dense_shape)
ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
# Feed with tuple
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: (values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testFetchIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = None
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices), None)
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = None
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
None)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind2 = ops.IndexedSlices(ind_values, ind_indices)
# Feed with tuple
values_out, indices_out = s.run(
[ind_values, ind_indices], {ind: (values, indices)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue
values_out, indices_out = s.run(
[ind_values, ind_indices],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.Variable(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
# Test nested tuple keys
z = (((array_ops.zeros([2]),),), array_ops.zeros([2]),
(array_ops.zeros([2]),))
result = [z[0][0][0] * 2, z[1] * 2, z[2][0] * 2]
values = (((np.array([1, 1]),),), np.array([2, 2]), (np.array([3, 3]),))
result_value = s.run(result, feed_dict={z: values})
self.assertAllEqual(result_value[0], 2 * np.ones(2))
self.assertAllEqual(result_value[1], 2 * np.array([2, 2]))
self.assertAllEqual(result_value[2], 2 * np.array([3, 3]))
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals(
'versions { producer: %d min_consumer: %d }' % (
versions.GRAPH_DEF_VERSION,
versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEquals(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEquals(len(sess.graph_def.node), 2)
self.assertAllEqual(c.eval(), 5.0)
self.assertAllEqual(d.eval(), 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEquals(len(sess.graph_def.node), 3)
self.assertAllEqual(e.eval(), 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testUseEmptyGraph(self):
with session.Session() as sess:
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run([])
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run(())
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run({})
def testNotEntered(self):
# pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None)
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e.eval())
sess.close()
def testInteractivePlacePrunedGraph(self):
sess = session.InteractiveSession()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/gpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
# Only run the valid op, this should work.
b.eval()
with self.assertRaises(errors.InvalidArgumentError):
a.eval()
sess.close()
def testDefaultSessionPlacePrunedGraph(self):
sess = session.Session()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/gpu:0'):
_ = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
with self.assertRaises(errors.InvalidArgumentError):
# Even though we don't run the bad op, we place the entire
# graph, which should fail with a non-interactive session.
sess.run(b)
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
def testFeedAndFetch(self):
with session.Session() as sess:
for dtype in [dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.int32,
dtypes.uint8,
dtypes.int16,
dtypes.int8,
dtypes.int64,
dtypes.bool,
dtypes.complex64,
dtypes.complex128]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes.bool:
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
sess.run(out_t, feed_dict={feed_t: np_array}))
# Check that we can also get the feed back.
self.assertAllEqual(np_array,
sess.run(feed_t, feed_dict={feed_t: np_array}))
# Also check that we can get both back.
out_v, feed_v = sess.run([out_t, feed_t],
feed_dict={feed_t: np_array})
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
feed_fetch_runner = sess.make_callable([out_t, feed_t], [feed_t])
out_v, feed_v = feed_fetch_runner(np_array)
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
def testFeedError(self):
with session.Session() as sess:
feed_t = array_ops.placeholder(dtype=dtypes.float32)
out_t = array_ops.identity(feed_t)
feed_val = constant_op.constant(5.0)
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
sess.run(out_t, feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.eval(feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.op.run(feed_dict={feed_t: feed_val})
def testFeedPrecisionLossError(self):
with session.Session() as sess:
largest_int64 = np.iinfo(np.int64).max
feed_int_implicit_int32 = constant_op.constant(1)
feed_int_explicit_int32 = constant_op.constant(1, dtype=dtypes.int32)
out_t = constant_op.constant(1.0)
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_implicit_int32: largest_int64})
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_explicit_int32: largest_int64})
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c.eval(), c_list)
def testStringFeed(self):
with session.Session() as sess:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape)
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(sess.run(c, feed_dict={feed_t: c_list}), c_list)
self.assertAllEqual(sess.run(feed_t, feed_dict={feed_t: c_list}),
c_list)
c_v, feed_v = sess.run([c, feed_t], feed_dict={feed_t: c_list})
self.assertAllEqual(c_v, c_list)
self.assertAllEqual(feed_v, c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = [b'\n\x01\x00', b'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [u'\n\x01\x00', u'\n\x00\x01',
u'\u26a3 unicode', u'\U0001f60e deal with it']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[len(c_list)])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaisesRegexp(
errors.NotFoundError,
'No session factory registered for the given session options'):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertTrue(isinstance(c.name, six.text_type))
self.assertTrue(isinstance(d.name, six.text_type))
self.assertTrue(isinstance(e.name, six.text_type))
self.assertTrue(isinstance(f.name, six.text_type))
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
def runTestPartialRun(self, sess):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 17
res = sess.partial_run(h, r2, feed_dict={c: temp})
self.assertEqual(153, res)
# Call again on the same graph.
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h2, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 18
res = sess.partial_run(h2, r2, feed_dict={c: temp})
self.assertEqual(162, res)
def runTestPartialRunIncomplete(self, sess):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
def runTestConcurrentPartialRun(self, sess):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h1 = sess.partial_run_setup([r1], [a, b, c])
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h1, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 19
res = sess.partial_run(h2, r1, feed_dict={a: temp, b: 9})
self.assertEqual(66, res)
res = sess.partial_run(h2, r2, feed_dict={c: 7})
self.assertEqual(462, res)
def runTestManyPartialRun(self, sess):
steps = 200
inputs = []
outputs = []
a = constant_op.constant(2.0, dtypes.float32)
for i in xrange(steps):
inputs.append(array_ops.placeholder(dtypes.float32, shape=[]))
a = math_ops.multiply(a, inputs[i])
outputs.append(a)
h = sess.partial_run_setup(outputs, inputs)
for i in xrange(steps):
res = sess.partial_run(h, outputs[i], feed_dict={inputs[i]: 1.0})
self.assertEqual(2.0, res)
feed_dict = {}
for i in xrange(steps):
feed_dict[inputs[i]] = 1.0
res = sess.run(outputs, feed_dict)
self.assertEqual(steps, len(res))
self.assertEqual(2.0, res[-1])
def runTestRunAndPartialRun(self, sess):
a = constant_op.constant(2.0, dtypes.float32)
b = a * 2
c = b * 3
r1 = sess.run([b, c])
h = sess.partial_run_setup([b, c], [])
r2 = sess.partial_run(h, [b, c])
self.assertEqual(r1, r2)
def runTestPartialRunMissingPlaceholderFeedException(self, sess):
x = array_ops.placeholder(dtypes.float32, shape=())
fetches = [x * 2, x * 3]
handle = sess.partial_run_setup(fetches=fetches, feeds=[])
with self.assertRaisesRegexp(errors.InvalidArgumentError,
'You must feed a value for placeholder'):
sess.partial_run(handle, fetches[0])
def testPartialRunDirect(self):
self.runTestPartialRun(session.Session())
def testPartialRunIncompleteDirect(self):
self.runTestPartialRunIncomplete(session.Session())
def testConcurrentPartialRunDirect(self):
self.runTestConcurrentPartialRun(session.Session())
def testManyPartialRunDirect(self):
self.runTestManyPartialRun(session.Session())
def testRunAndPartialRunDirect(self):
self.runTestRunAndPartialRun(session.Session())
def testPartialRunMissingPlaceholderFeedExceptionDirect(self):
self.runTestPartialRunMissingPlaceholderFeedException(session.Session())
def testPartialRunDist(self):
server = server_lib.Server.create_local_server()
self.runTestPartialRun(session.Session(server.target))
def testPartialRunIncompleteDist(self):
server = server_lib.Server.create_local_server()
self.runTestPartialRunIncomplete(session.Session(server.target))
def testConcurrentPartialRunDist(self):
server = server_lib.Server.create_local_server()
self.runTestConcurrentPartialRun(session.Session(server.target))
def testManyPartialRunDist(self):
server = server_lib.Server.create_local_server()
self.runTestManyPartialRun(session.Session(server.target))
def testRunAndPartialRunDist(self):
server = server_lib.Server.create_local_server()
self.runTestRunAndPartialRun(session.Session(server.target))
def testPartialRunMissingPlaceholderFeedExceptionDist(self):
server = server_lib.Server.create_local_server()
self.runTestPartialRunMissingPlaceholderFeedException(
session.Session(server.target))
def testFeedDictKeyException(self):
with session.Session() as sess:
a = constant_op.constant(1.0, dtypes.float32, name='a')
with self.assertRaisesRegexp(TypeError, 'Cannot interpret feed_dict'):
sess.run(a, feed_dict={'a': [2.0]})
def testPerStepTrace(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
sess.run(constant_op.constant(1.0))
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testRunOptionsRunMetadata(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
# all combinations are valid
sess.run(constant_op.constant(1.0), options=None, run_metadata=None)
sess.run(constant_op.constant(1.0), options=None,
run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=None)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testFeedShapeCompatibility(self):
with session.Session() as sess:
some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0])
new_shape = constant_op.constant([2, 2])
reshaped_tensor = array_ops.reshape(some_tensor, new_shape)
with self.assertRaisesRegexp(ValueError, 'Cannot feed value of shape'):
sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]})
with self.assertRaisesRegexp(ValueError, 'may not be fed'):
sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]})
def testInferShapesFalse(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session()
self.assertFalse('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testInferShapesTrue(self):
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(infer_shapes=True))
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session(config=config)
self.assertTrue('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testBuildCostModel(self):
run_options = config_pb2.RunOptions()
config = config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(build_cost_model=100))
with session.Session(config=config) as sess:
with ops.device('/gpu:0'):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = math_ops.add(a, a)
c = array_ops.identity(b)
d = math_ops.multiply(c, c)
for step in xrange(120):
run_metadata = config_pb2.RunMetadata()
sess.run(d, feed_dict={a: 1.0},
options=run_options, run_metadata=run_metadata)
if step == 99:
self.assertTrue(run_metadata.HasField('cost_graph'))
else:
self.assertFalse(run_metadata.HasField('cost_graph'))
def testNonInteractiveSessionNesting(self):
sess1 = session.Session()
sess1_controller = sess1.as_default()
sess1_controller.__enter__()
sess2 = session.Session()
sess2_controller = sess2.as_default()
sess2_controller.__enter__()
with self.assertRaisesRegexp(AssertionError, 'Nesting violated'):
sess1_controller.__exit__(None, None, None)
ops._default_session_stack.reset()
def testInteractiveSessionNesting(self):
sess1 = session.InteractiveSession()
sess2 = session.InteractiveSession()
del sess1
del sess2
def testAsDefault(self):
c = constant_op.constant(37)
sess = session.Session()
with sess.as_default():
self.assertEqual(37, c.eval())
# Ensure that the session remains valid even when it is not captured.
with session.Session().as_default():
self.assertEqual(37, c.eval())
def testReentry(self):
sess = session.Session()
with self.assertRaisesRegexp(RuntimeError, 'not re-entrant'):
with sess:
with sess:
pass
def testInvalidArgument(self):
with self.assertRaisesRegexp(TypeError, 'target must be a string'):
session.Session(37)
with self.assertRaisesRegexp(TypeError, 'config must be a tf.ConfigProto'):
session.Session(config=37)
with self.assertRaisesRegexp(TypeError, 'graph must be a tf.Graph'):
session.Session(graph=37)
def testTimeoutWithShortOperations(self):
num_epochs = 5
q = data_flow_ops.FIFOQueue(
capacity=50, dtypes=[dtypes.int32], shapes=[()])
enqueue_op = q.enqueue_many(constant_op.constant([1, 2]))
# Use a 10-second timeout, which should be longer than any
# non-blocking enqueue_many op.
config = config_pb2.ConfigProto(operation_timeout_in_ms=10000)
with session.Session(config=config) as sess:
for _ in range(num_epochs):
sess.run(enqueue_op)
self.assertEqual(sess.run(q.size()), num_epochs * 2)
def testRegisterFetchAndFeedConversionFunctions(self):
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = math_ops.square(tensor)
fetch_fn = lambda squared_tensor: ([squared_tensor.sq], lambda val: val[0])
feed_fn1 = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_fn2 = lambda feed: [feed.sq]
session.register_session_run_conversion_functions(SquaredTensor, fetch_fn,
feed_fn1, feed_fn2)
with self.assertRaises(ValueError):
session.register_session_run_conversion_functions(SquaredTensor,
fetch_fn, feed_fn1, feed_fn2)
with self.test_session() as sess:
np1 = np.array([1.0, 1.5, 2.0, 2.5])
np2 = np.array([3.0, 3.5, 4.0, 4.5])
squared_tensor = SquaredTensor(np2)
squared_eval = sess.run(squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
squared_eval = sess.run(squared_tensor, feed_dict={
squared_tensor : np1 * np1})
self.assertAllClose(np1 * np1, squared_eval)
partial_run = sess.partial_run_setup([squared_tensor], [])
squared_eval = sess.partial_run(partial_run, squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
mdaniel/intellij-community | python/helpers/py2only/docutils/languages/ru.py | 128 | 2155 | # -*- coding: utf-8 -*-
# $Id: ru.py 7125 2011-09-16 18:36:18Z milde $
# Author: Roman Suzi <rnd@onego.ru>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Russian-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
u'abstract': u'Аннотация',
u'address': u'Адрес',
u'attention': u'Внимание!',
u'author': u'Автор',
u'authors': u'Авторы',
u'caution': u'Осторожно!',
u'contact': u'Контакт',
u'contents': u'Содержание',
u'copyright': u'Права копирования',
u'danger': u'ОПАСНО!',
u'date': u'Дата',
u'dedication': u'Посвящение',
u'error': u'Ошибка',
u'hint': u'Совет',
u'important': u'Важно',
u'note': u'Примечание',
u'organization': u'Организация',
u'revision': u'Редакция',
u'status': u'Статус',
u'tip': u'Подсказка',
u'version': u'Версия',
u'warning': u'Предупреждение'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
u'аннотация': u'abstract',
u'адрес': u'address',
u'автор': u'author',
u'авторы': u'authors',
u'контакт': u'contact',
u'права копирования': u'copyright',
u'дата': u'date',
u'посвящение': u'dedication',
u'организация': u'organization',
u'редакция': u'revision',
u'статус': u'status',
u'версия': u'version'}
"""Russian (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
| apache-2.0 |
Garrett-R/scikit-learn | sklearn/cross_decomposition/pls_.py | 5 | 28683 | """
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <edouard.duchesnay@cea.fr>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights))
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
## 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights))
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / np.dot(y_weights.T, y_weights)
## y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coefs: array, [p, q]
The coefficients of the linear model: Y = X coefs + Err
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float, copy=self.copy)
Y = check_array(Y, dtype=np.float, copy=self.copy)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if n != Y.shape[0]:
raise ValueError(
'Incompatible shapes: X has %s samples, while Y '
'has %s' % (X.shape[0], Y.shape[0]))
if self.n_components < 1 or self.n_components > p:
raise ValueError('invalid number of components')
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if not self.deflation_mode in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
#1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
#2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.inv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.inv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coefs = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coefs = (1. / self.x_std_.reshape((p, 1)) * self.coefs *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
# Normalize
if copy:
Xc = (np.asarray(X) - self.x_mean_) / self.x_std_
if Y is not None:
Yc = (np.asarray(Y) - self.y_mean_) / self.y_std_
else:
X = np.asarray(X)
Xc -= self.x_mean_
Xc /= self.x_std_
if Y is not None:
Y = np.asarray(Y)
Yc -= self.y_mean_
Yc /= self.y_std_
# Apply rotation
x_scores = np.dot(Xc, self.x_rotations_)
if Y is not None:
y_scores = np.dot(Yc, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
# Normalize
if copy:
Xc = (np.asarray(X) - self.x_mean_)
else:
X = np.asarray(X)
Xc -= self.x_mean_
Xc /= self.x_std_
Ypred = np.dot(Xc, self.coefs)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q]
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coefs: array, [p, q]
The coefficients of the linear model: Y = X coefs + Err
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples is the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q]
Training vectors, where n_samples is the number of samples and
q is the number of response variables.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vector, where n_samples is the number of samples and
p is the number of predictors. X will be centered before any analysis.
Y : array-like of response, shape = [n_samples, q]
Training vector, where n_samples is the number of samples and
q is the number of response variables. X will be centered before any
analysis.
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale X and Y.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float, copy=self.copy)
Y = check_array(Y, dtype=np.float, copy=self.copy)
p = X.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('invalid number of components')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components == C.shape[1]:
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
| bsd-3-clause |
google/mirandum | alerts/twitchalerts/tests.py | 1 | 1768 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.test import TestCase
from django.utils import timezone
from twitchalerts.models import TwitchalertsUpdate, TwitchalertsEvent
from django.contrib.auth.models import User
from donations.models import Donation
from twitchalerts.support import run_twitchalerts
SAMPLE = {
"data":[
{
"donation_id":"80179029",
"created_at":"1438576556",
"currency":"USD",
"amount":"50",
"name":"Thomas",
"message":"nice!"
}
]
}
class InsertTwitchalertss(TestCase):
def setUp(self):
u = User(username="chris-ta")
u.save()
updater = TwitchalertsUpdate(access_token = "a", refresh_token="b", refresh_before = timezone.now(), type="twitchalerts", user=u)
updater.save()
self.updater = updater
def testInsertion(self):
def producer(*args):
return SAMPLE
self.assertEqual(Donation.objects.filter(type="twitchalerts").count(), 0)
run_twitchalerts(self.updater, producer=producer)
self.assertEqual(TwitchalertsEvent.objects.count(), 1)
self.assertEqual(Donation.objects.filter(type="twitchalerts").count(), 1)
| apache-2.0 |
arnaudsj/pybrain | pybrain/tools/datasets/mnist.py | 4 | 1906 | import itertools
import os
import scipy
import struct
from pybrain.datasets import SupervisedDataSet
def labels(filename):
fp = file(filename)
magicnumber, length = struct.unpack('>ii', fp.read(8))
assert magicnumber in (2049, 2051), ("Not an MNIST file: %i" % magicnumber)
for _ in xrange(length):
label, = struct.unpack('B', fp.read(1))
yield label
def images(filename):
fp = file(filename)
chunk = fp.read(16)
magicnumber, length, numrows, numcols = struct.unpack('>iiii', chunk)
assert magicnumber in (2049, 2051), ("Not an MNIST file: %i" % magicnumber)
imagesize = numrows * numcols
for _ in xrange(length):
imagestring = fp.read(imagesize)
image = struct.unpack('B' * imagesize, imagestring)
yield scipy.array(image)
def flaggedArrayByIndex(idx, length):
arr = scipy.zeros(length)
arr[idx] = 1.
return arr
def makeMnistDataSets(path):
"""Return a pair consisting of two datasets, the first being the training
and the second being the test dataset."""
test = SupervisedDataSet(28 * 28, 10)
test_image_file = os.path.join(path, 't10k-images-idx3-ubyte')
test_label_file = os.path.join(path, 't10k-labels-idx1-ubyte')
test_images = images(test_image_file)
test_labels = (flaggedArrayByIndex(l, 10) for l in labels(test_label_file))
for image, label in itertools.izip(test_images, test_labels):
test.addSample(image, label)
train = SupervisedDataSet(28 * 28, 10)
train_image_file = os.path.join(path, 'train-images-idx3-ubyte')
train_label_file = os.path.join(path, 'train-labels-idx1-ubyte')
train_images = images(train_image_file)
train_labels = (flaggedArrayByIndex(l, 10) for l in labels(train_label_file))
for image, label in itertools.izip(train_images, train_labels):
train.addSample(image, label)
return train, test | bsd-3-clause |
bitifirefly/edx-platform | common/djangoapps/student/tests/test_parental_controls.py | 155 | 3850 | """Unit tests for parental controls."""
import datetime
from django.test import TestCase
from django.test.utils import override_settings
from student.models import UserProfile
from student.tests.factories import UserFactory
class ProfileParentalControlsTest(TestCase):
"""Unit tests for requires_parental_consent."""
password = "test"
def setUp(self):
super(ProfileParentalControlsTest, self).setUp()
self.user = UserFactory.create(password=self.password)
self.profile = UserProfile.objects.get(id=self.user.id)
def set_year_of_birth(self, year_of_birth):
"""
Helper method that creates a mock profile for the specified user.
"""
self.profile.year_of_birth = year_of_birth
self.profile.save()
def test_no_year_of_birth(self):
"""Verify the behavior for users with no specified year of birth."""
self.assertTrue(self.profile.requires_parental_consent())
self.assertTrue(self.profile.requires_parental_consent(default_requires_consent=True))
self.assertFalse(self.profile.requires_parental_consent(default_requires_consent=False))
@override_settings(PARENTAL_CONSENT_AGE_LIMIT=None)
def test_no_parental_controls(self):
"""Verify the behavior for all users when parental controls are not enabled."""
self.assertFalse(self.profile.requires_parental_consent())
self.assertFalse(self.profile.requires_parental_consent(default_requires_consent=True))
self.assertFalse(self.profile.requires_parental_consent(default_requires_consent=False))
# Verify that even a child does not require parental consent
current_year = datetime.datetime.now().year
self.set_year_of_birth(current_year - 10)
self.assertFalse(self.profile.requires_parental_consent())
def test_adult_user(self):
"""Verify the behavior for an adult."""
current_year = datetime.datetime.now().year
self.set_year_of_birth(current_year - 20)
self.assertFalse(self.profile.requires_parental_consent())
self.assertTrue(self.profile.requires_parental_consent(age_limit=21))
def test_child_user(self):
"""Verify the behavior for a child."""
current_year = datetime.datetime.now().year
# Verify for a child born 13 years agp
self.set_year_of_birth(current_year - 13)
self.assertTrue(self.profile.requires_parental_consent())
self.assertTrue(self.profile.requires_parental_consent(date=datetime.date(current_year, 12, 31)))
self.assertFalse(self.profile.requires_parental_consent(date=datetime.date(current_year + 1, 1, 1)))
# Verify for a child born 14 years ago
self.set_year_of_birth(current_year - 14)
self.assertFalse(self.profile.requires_parental_consent())
self.assertFalse(self.profile.requires_parental_consent(date=datetime.date(current_year, 1, 1)))
def test_profile_image(self):
"""Verify that a profile's image obeys parental controls."""
# Verify that an image cannot be set for a user with no year of birth set
self.profile.profile_image_uploaded_at = datetime.datetime.now()
self.profile.save()
self.assertFalse(self.profile.has_profile_image)
# Verify that an image can be set for an adult user
current_year = datetime.datetime.now().year
self.set_year_of_birth(current_year - 20)
self.profile.profile_image_uploaded_at = datetime.datetime.now()
self.profile.save()
self.assertTrue(self.profile.has_profile_image)
# verify that a user's profile image is removed when they switch to requiring parental controls
self.set_year_of_birth(current_year - 10)
self.profile.save()
self.assertFalse(self.profile.has_profile_image)
| agpl-3.0 |
jcobham/barebox | Documentation/gen_commands.py | 10 | 5303 | #!/usr/bin/python
import errno
import os
import re
import sys
import hashlib
from collections import defaultdict
from pprint import pprint
# TODO: handle commands with the same name in multiple files
# TODO: handle #ifdefs
HELP_START = re.compile(r"""^BAREBOX_CMD_HELP_START\s*\((\w+)\)?\s*$""")
HELP_TEXT = re.compile(r"""^BAREBOX_CMD_HELP_TEXT\s*\("(.*?)"\)?\s*$""")
HELP_OPT = re.compile(r"""^BAREBOX_CMD_HELP_OPT\s*\("(.+?)",\s*"(.+?)"\)?\s*$""")
HELP_END = re.compile(r"""^BAREBOX_CMD_HELP_END\s*$""")
CMD_START = re.compile(r"""^BAREBOX_CMD_START\s*\((.+)\)\s*$""")
CMD_FUNC = re.compile(r"""^\s*\.cmd\s*=\s*(.+?),\s*$""")
CMD_DESC = re.compile(r"""^\s*BAREBOX_CMD_DESC\s*\("(.*?)"\)?\s*$""")
CMD_OPTS = re.compile(r"""^\s*BAREBOX_CMD_OPTS\s*\("(.*?)"\)?\s*$""")
CMD_GROUP = re.compile(r"""^\s*BAREBOX_CMD_GROUP\s*\((.+)\)\s*$""")
CMD_END = re.compile(r"""^BAREBOX_CMD_END\s*$""")
CONT = re.compile(r"""\s*"(.*?)"\s*\)?\s*$""")
CMDS = {}
def parse_c(name):
cmd = None
last = None
for line in file(name, 'r'):
x = HELP_START.match(line)
if x:
cmd = CMDS.setdefault(x.group(1), defaultdict(list))
cmd.setdefault("files", set()).add(name)
continue
x = CMD_START.match(line)
if x:
cmd = CMDS.setdefault(x.group(1), defaultdict(list))
cmd.setdefault("files", set()).add(name)
continue
if cmd is None:
continue
x = HELP_TEXT.match(line)
if x:
if 'h_opts' not in cmd:
last = cmd['h_pre']
else:
last = cmd['h_post']
last.append(x.group(1).decode("string_escape").strip())
continue
x = HELP_OPT.match(line)
if x:
last = cmd['h_opts']
last.append([
x.group(1).decode("string_escape"),
x.group(2).decode("string_escape")
])
continue
x = CMD_FUNC.match(line)
if x:
last = cmd['c_func']
last.append(x.group(1))
continue
x = CMD_DESC.match(line)
if x:
last = cmd['c_desc']
last.append(x.group(1).decode("string_escape"))
continue
x = CMD_OPTS.match(line)
if x:
last = cmd['c_opts']
last.append(x.group(1).decode("string_escape"))
continue
x = CMD_GROUP.match(line)
if x:
last = cmd['c_group']
last.append(x.group(1).split('_')[-1].lower())
continue
x = CONT.match(line)
if x:
if last is None:
raise Exception("Parse error in %s: %r" % (name, line))
if isinstance(last[-1], str):
last[-1] += x.group(1).decode("string_escape")
elif isinstance(last[-1], list):
last[-1][1] += x.group(1).decode("string_escape")
continue
x = HELP_END.match(line)
if x:
cmd = last = None
x = CMD_END.match(line)
if x:
cmd = last = None
def gen_rst(name, cmd):
out = []
out.append('.. index:: %s (command)' % name)
out.append('')
out.append('.. _command_%s:' % name)
out.append('')
if 'c_desc' in cmd:
out.append("%s - %s" % (name, ''.join(cmd['c_desc']).strip()))
else:
out.append("%s" % (name,))
out.append('='*len(out[-1]))
out.append('')
if 'c_opts' in cmd:
out.append('Usage')
out.append('^'*len(out[-1]))
out.append('``%s %s``' % (name, ''.join(cmd['c_opts']).strip()))
out.append('')
if 'h_pre' in cmd:
pre = cmd['h_pre']
if pre and pre[-1] == "Options:":
del pre[-1]
if pre and pre[-1] == "":
del pre[-1]
if pre:
out.append('Synopsis')
out.append('^'*len(out[-1]))
out.append('\n'.join(cmd['h_pre']).strip())
out.append('')
if 'h_opts' in cmd:
out.append('Options')
out.append('^'*len(out[-1]))
for o, d in cmd['h_opts']:
o = o.strip()
d = d.strip()
if o:
out.append('%s\n %s' % (o, d))
else:
out.append(' %s' % (d,))
out.append('')
if 'h_post' in cmd:
post = cmd['h_post']
if post and post[0] == "":
del post[0]
if post:
out.append('Description')
out.append('^'*len(out[-1]))
out.append('\n'.join(cmd['h_post']).strip())
out.append('')
out.append('.. generated from: %s' % ', '.join(cmd['files']))
if 'c_func' in cmd:
out.append('.. command function: %s' % ', '.join(cmd['c_func']))
return '\n'.join(out)
for root, dirs, files in os.walk(sys.argv[1]):
for name in files:
if name.endswith('.c'):
source = os.path.join(root, name)
parse_c(source)
for name in CMDS.keys():
CMDS[name] = dict(CMDS[name])
for name, cmd in CMDS.items():
#pprint({name: cmd})
rst = gen_rst(name, cmd)
group = cmd.get('c_group')
if group is None:
print >> sys.stderr, "gen_commands: warning: using default group 'misc' for command '%s'" % name
group = ['misc']
subdir = os.path.join(sys.argv[2], group[0])
try:
os.makedirs(subdir)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(subdir):
pass
else:
raise
target = os.path.join(subdir, name+'.rst')
# Only write the new rst if it differs from the old one. Wroto
hash_old = hashlib.sha1()
try:
f = open(target, 'rb')
hash_old.update(f.read())
except:
pass
hash_new = hashlib.sha1()
hash_new.update(rst)
if hash_old.hexdigest() == hash_new.hexdigest():
continue
file(target, 'w').write(rst)
| gpl-2.0 |
fabianofranz/origin | Godeps/_workspace/src/k8s.io/kubernetes/examples/selenium/selenium-test.py | 173 | 1109 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
def check_browser(browser):
driver = webdriver.Remote(
command_executor='http://selenium-hub:4444/wd/hub',
desired_capabilities=getattr(DesiredCapabilities, browser)
)
driver.get("http://google.com")
assert "google" in driver.page_source
driver.close()
print("Browser %s checks out!" % browser)
check_browser("FIREFOX")
check_browser("CHROME")
| apache-2.0 |
lakiw/cripts | cripts/core/mongo_tools.py | 1 | 14342 | from django.conf import settings
if settings.FILE_DB == settings.S3:
from cripts.core.s3_tools import get_file_s3
import gridfs
import pymongo
import magic
class MongoError(Exception):
"""
Generic MongoError exception.
"""
pass
# TODO: mongo_connector() and gridfs_connector() can probably be combined into
# one function.
# Setup standard connector to the MongoDB instance for use in any functions
def mongo_connector(collection, preference=settings.MONGO_READ_PREFERENCE):
"""
Connect to the mongo database if you need to use PyMongo directly and not
use MongoEngine.
:param collection: the collection to use.
:type collection: str
:param preference: PyMongo Read Preference for ReplicaSet/clustered DBs.
:type preference: str.
:returns: :class:`pymongo.MongoClient`,
:class:`cripts.core.mongo_tools.MongoError`
"""
try:
connection = pymongo.MongoClient("%s" % settings.MONGO_HOST,
settings.MONGO_PORT,
read_preference=preference,
ssl=settings.MONGO_SSL)
db = connection[settings.MONGO_DATABASE]
if settings.MONGO_USER:
db.authenticate(settings.MONGO_USER, settings.MONGO_PASSWORD)
return db[collection]
except pymongo.errors.ConnectionFailure as e:
raise MongoError("Error connecting to Mongo database: %s" % e)
except KeyError as e:
raise MongoError("Unknown database or collection: %s" % e)
except:
raise
def gridfs_connector(collection, preference=settings.MONGO_READ_PREFERENCE):
"""
Connect to the mongo database if you need to use PyMongo directly and not
use MongoEngine. Used specifically for accessing GridFS.
:param collection: the collection to use.
:type collection: str
:param preference: PyMongo Read Preference for ReplicaSet/clustered DBs.
:type preference: str.
:returns: :class:`gridfs.GridFS`,
:class:`cripts.core.mongo_tools.MongoError`
"""
try:
connection = pymongo.MongoClient("%s" % settings.MONGO_HOST,
settings.MONGO_PORT,
read_preference=preference,
ssl=settings.MONGO_SSL)
db = connection[settings.MONGO_DATABASE]
if settings.MONGO_USER:
db.authenticate(settings.MONGO_USER, settings.MONGO_PASSWORD)
return gridfs.GridFS(db, collection)
except pymongo.errors.ConnectionFailure as e:
raise MongoError("Error connecting to Mongo database: %s" % e)
except KeyError as e:
raise MongoError("Unknown database: %s" % e)
except:
raise
def get_file(sample_md5, collection):
"""
Get a file from GridFS (or S3 if that's what you've configured).
:param sample_md5: The MD5 of the file to download.
:type sample_md5: str
:param collection: The collection to grab the file from.
:type collection: str
:returns: str
"""
# Workaround until pcap download uses pcap object
if settings.FILE_DB == settings.GRIDFS:
return get_file_gridfs(sample_md5, collection)
elif settings.FILE_DB == settings.S3:
objs = mongo_connector(collection)
obj = objs.find_one({"md5": sample_md5})
oid = obj['filedata']
return get_file_s3(oid,collection)
def put_file(m, data, collection):
"""
Add a file to storage.
:param m: The filename.
:type m: str
:param data: The data to add.
:type data: str
:param collection: The collection to grab the file from.
:type collection: str
:returns: str
"""
return put_file_gridfs(m, data, collection)
def get_file_gridfs(sample_md5, collection):
"""
Get a file from GridFS.
:param sample_md5: The MD5 of the file to download.
:type sample_md5: str
:param collection: The collection to grab the file from.
:type collection: str
:returns: str
"""
data = None
try:
fm = mongo_connector("%s.files" % collection)
objectid = fm.find_one({'md5': sample_md5}, {'_id': 1})['_id']
fs = gridfs_connector("%s" % collection)
data = fs.get(objectid).read()
except Exception:
return None
return data
def put_file_gridfs(m, data, collection):
"""
Add a file to storage.
:param m: The filename.
:type m: str
:param data: The data to add.
:type data: str
:param collection: The collection to grab the file from.
:type collection: str
:returns: str
"""
mimetype = magic.from_buffer(data, mime=True)
try:
fs = gridfs_connector("%s" % collection)
fs.put(data, content_type="%s" % mimetype, filename="%s" % m)
except Exception:
return None
return m
def delete_file(sample_md5, collection):
"""
delete_file allows you to delete a file from a gridfs collection specified
in the collection parameter.
this will only remove the file object, not metadata from assocatiated collections
for full deletion of metadata and file use delete_sample
:param sample_md5: The MD5 of the file to delete.
:type sample_md5: str
:param collection: The collection to delete the file from.
:type collection: str
:returns: True, False, None
"""
fm = mongo_connector("%s.files" % collection)
sample = fm.find_one({'md5': sample_md5}, {'_id': 1})
success = None
if sample:
objectid = sample["_id"]
fs = gridfs_connector("%s" % collection)
try:
fs.delete(objectid)
return True
except:
return None
return success
####################################################
# NOTE: The following wrappers are only here for #
# legacy code and rare instances where we #
# cannot use MongoEngine to achieve our #
# goal. Please use these as a last resort! #
####################################################
# Wrapper for pymongo's find_one function
def mongo_find_one(collection, query, fields=None, skip=0, sort=None,
*args, **kwargs):
"""
Find one document from a collection matching the parameters.
:param collection: The collection to query.
:type collection: str
:param query: The query to find the document(s).
:type query: dict
:param fields: The fields to return for each document.
:type fields: dict
:param skip: How many documents to skip before returning.
:type skip: int
:param sort: How to sort the results.
:type sort: dict
:returns: PyMongo cursor.
"""
col = mongo_connector(collection)
return col.find_one(query, fields, skip=skip, sort=sort, *args, **kwargs)
# Wrapper for pymongo's find function
def mongo_find(collection, query, fields=None, skip=0, limit=0, sort=None,
count=False, *args, **kwargs):
"""
Find documents from a collection matching the parameters.
:param collection: The collection to query.
:type collection: str
:param query: The query to find the document(s).
:type query: dict
:param fields: The fields to return for each document.
:type fields: dict
:param skip: How many documents to skip before returning.
:type skip: int
:param limit: How many documents to return.
:type limit: int
:param sort: How to sort the results.
:type sort: dict
:param count: Only return a count of the documents.
:type count: boolean
:returns: PyMongo cursor, int
"""
col = mongo_connector(collection)
results = col.find(query, fields, skip=skip, limit=limit, sort=sort,
*args, **kwargs)
if not kwargs.get('timeout', True):
col.close
if count:
return results.count()
else:
return results
# Wrapper for pymongo's insert function
def mongo_insert(collection, doc_or_docs, username=None, safe=True, *args,
**kwargs):
"""
Insert documents into a collection.
:param collection: The collection to query.
:type collection: str
:param doc_or_docs: A single document or list of documents to insert.
:type doc_or_docs: dict or list
:param username: The user inserting these documents.
:type username: str
:param safe: Whether or not to insert in safe mode.
:type safe: boolean
:returns: dict with keys:
"success" (boolean),
"message" (list),
"object" (insertion response) if successful.
"""
col = mongo_connector(collection)
try:
col.insert(doc_or_docs, safe=safe, check_keys=True, *args, **kwargs)
return {'success':True, 'message':[], 'object':doc_or_docs}
except Exception, e:
# OperationFailure gets raised only if safe=True and there is some error
return {'success':False, 'message':[format_error(e)]}
# Wrapper for pymongo's update function
def mongo_update(collection, query, alter, username=None,
multi=True, upsert=False, safe=True, *args, **kwargs):
"""
Update documents in a collection.
:param collection: The collection to query.
:type collection: str
:param query: The query to use to find the documents to update.
:type query: dict
:param alter: How to update the documents.
:type alter: dict
:param username: The user updating the documents.
:type username: str
:param multi: Whether or not to update multiple documents.
:type multi: boolean
:param upsert: Insert documents into the collection if they are not found.
:type upsert: boolean
:param safe: Use safe mode while performing the update.
:type safe: boolean
:returns: dict with keys "success" (boolean) and "message" (list)
"""
col = mongo_connector(collection)
try:
r = col.update(query, alter, multi=multi, upsert=upsert,
check_keys=True, safe=safe, *args, **kwargs)
return {'success':True, 'message':[r]}
except Exception, e:
return {'success':False, 'message':[format_error(e)]}
# Wrapper for pymongo's save function
def mongo_save(collection, to_save, username=None, safe=True, *args, **kwargs):
"""
Save a document to a collection.
:param collection: The collection to query.
:type collection: str
:param to_save: The document to save.
:type to_save: dict
:param username: The user saving the document.
:type username: str
:param safe: Use safe mode while performing the save.
:type safe: boolean
:returns: dict with keys "success" (boolean) and "message" (list)
"""
col = mongo_connector(collection)
try:
r = col.save(to_save, check_keys=True, manipulate=True, safe=safe,
*args, **kwargs)
return {'success':True, 'message':[r]}
except Exception, e:
return {'success':False, 'message':[format_error(e)]}
# Wrapper for pymongo's find_and_modify function
def mongo_find_and_modify(collection, query, alter, fields=None, username=None,
sort={}, remove=False, new=False, upsert=False, *args,
**kwargs):
"""
Find documents from a collection matching the parameters, update them, and
return them.
:param collection: The collection to query.
:type collection: str
:param query: The query to use to find the documents to update.
:type query: dict
:param alter: How to update the documents.
:type alter: dict
:param fields: The fields to return for each document.
:type fields: dict
:param username: The user updating the documents.
:type username: str
:param sort: How to sort the results.
:type sort: dict
:param remove: Remove documents instead of update.
:type remove: boolean
:param new: Return the updated documents instead of the original ones.
:param upsert: Insert documents into the collection if they are not found.
:type upsert: boolean
:returns: dict with keys:
"success" (boolean),
"message" (list),
"object" (cursor) if successful.
"""
try:
col = mongo_connector(collection)
result = col.find_and_modify(query, update=alter, fields=fields,
remove=remove, new=new, upsert=upsert,
sort=sort, *args, **kwargs)
except Exception, e:
return {'success':False, 'message':[format_error(e)]}
try:
return {'success':True, 'message':[], 'object': result}
except Exception, e:
return {'success':True, 'message':[format_error(e)], 'object': result}
# Wrapper for pymongo's remove function
def mongo_remove(collection, query=None, username=None, safe=True, verify=False,
*args, **kwargs):
"""
Find documents from a collection matching the parameters.
:param collection: The collection to query.
:type collection: str
:param query: The query to use to find the documents to remove.
:type query: dict
:param username: The user removing the documents.
:type username: str
:param safe: Use safe mode while removing the documents.
:type safe: boolean
:param verify: Verify the removal.
:type verify: boolean
:returns: dict with keys "success" (boolean) and "message" list.
"""
if not query:
return {'success': False, 'message':['No query supplied to remove']}
else:
try:
col = mongo_connector(collection)
col.remove(query, safe=safe, *args, **kwargs)
if verify:
if mongo_find(collection, query, count=True):
return {'success':False,
'message':['Unknown error; unable to remove item']}
return {'success':True, 'message':[]}
except Exception, e:
return {'success':False, 'message':[format_error(e)]}
def format_error(e):
"""
wrapper for core/handlers format_error function.
Redefined here to avoid circular imports.
:param e: The error.
:type e: :class:`Exception`
:returns: str
"""
from cripts.core.handlers import format_error as fe
return fe(e)
| mit |
danmar/cppcheck | tools/ci.py | 8 | 2424 | #!/usr/bin/env python
# continuous integration
# build daily reports (doxygen,coverage,etc)
import datetime
import time
import subprocess
import pexpect
import glob
import sys
# Upload file to sourceforge web server using scp
def upload(file_to_upload, destination):
try:
password = sys.argv[1]
child = pexpect.spawn(
'scp ' + file_to_upload + ' danielmarjamaki,cppcheck@web.sourceforge.net:' + destination)
# child.expect(
# 'danielmarjamaki,cppcheck@web.sourceforge.net\'s password:')
child.expect('Password:')
child.sendline(password)
child.interact()
except (IOError, OSError, pexpect.TIMEOUT):
pass
# git push
def gitpush():
try:
password = sys.argv[1]
child = pexpect.spawn('git push')
child.expect("Enter passphrase for key '/home/daniel/.ssh/id_rsa':")
child.sendline(password)
child.interact()
except (IOError, OSError, pexpect.TIMEOUT):
pass
def iconv(filename):
p = subprocess.Popen(['file', '-i', filename],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
comm = p.communicate()
if 'charset=iso-8859-1' in comm[0]:
subprocess.call(
["iconv", filename, "--from=ISO-8859-1", "--to=UTF-8", "-o", filename])
# Generate daily webreport
def generate_webreport():
for filename in glob.glob('*/*.cpp'):
iconv(filename)
subprocess.call(
["git", "commit", "-a", "-m", '"automatic conversion from iso-8859-1 formatting to utf-8"'])
gitpush()
subprocess.call(["rm", "-rf", "devinfo"])
subprocess.call(['nice', "./webreport.sh"])
upload('-r devinfo', 'htdocs/')
subprocess.call(["make", "clean"])
subprocess.call(["rm", "-rf", "devinfo"])
# Perform a git pull.
def gitpull():
try:
password = sys.argv[1]
child = pexpect.spawn('git pull')
child.expect("Enter passphrase for key '/home/daniel/.ssh/id_rsa':")
child.sendline(password)
child.expect('Already up-to-date.')
child.interact()
except (IOError, OSError, pexpect.TIMEOUT):
pass
except pexpect.EOF:
return True
return False
t0 = None
while True:
if datetime.date.today() != t0:
print("generate daily reports")
t0 = datetime.date.today()
gitpull()
generate_webreport()
time.sleep(60)
| gpl-3.0 |
jbarriosc/ACSUFRO | LGPL/CommonSoftware/acssim/src/Acssim/Servants/Representations/BaseRepresentation.py | 4 | 4088 | # @(#) $Id$
#
# Copyright (C) 2001
# Associated Universities, Inc. Washington DC, USA.
#
# Produced for the ALMA project
#
# This library is free software; you can redistribute it and/or modify it under
# the terms of the GNU Library General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more
# details.
#
# You should have received a copy of the GNU Library General Public License
# along with this library; if not, write to the Free Software Foundation, Inc.,
# 675 Massachusetts Ave, Cambridge, MA 02139, USA. Correspondence concerning
# ALMA should be addressed as follows:
#
# Internet email: alma-sw-admin@nrao.edu
# "@(#) $Id$"
#
# who when what
# -------- ---------- -------------------------------------------------------
# dfugate 2003/12/09 Created.
#------------------------------------------------------------------------------
'''
Contains base class definition for simulated entries.
'''
#--REGULAR IMPORTS-------------------------------------------------------------
from copy import copy
from inspect import isfunction
#--CORBA STUBS-----------------------------------------------------------------
#--ACS Imports-----------------------------------------------------------------
from Acspy.Common.Log import getLogger
from Acssim.Goodies import getComponent
#--GLOBALS---------------------------------------------------------------------
__revision__ = "@(#) $Id$"
#------------------------------------------------------------------------------
class BaseRepresentation:
'''
Class BaseRepresentation is a baseclass which describes simulated components.
'''
#--------------------------------------------------------------------------
def __init__(self, compname):
'''
Constructor.
Parameters:
- compname is the name of the component to be simulated
Returns: Nothing
Raises: Nothing
'''
#save the name of the component we're looking for
self.compname = str(compname)
#our logger
self.logger = getLogger(compname)
#this dictionary contains descriptions of all simulated component methods
#and attributes
self.methods = {}
#reference to the component
self.comp_ref = None
#--------------------------------------------------------------------------
def getMethod(self, method_name):
'''
Returns a Python dictionary describing the given method or None if it
does not exist.
Parameters:
method_name - name of the method
comp_ref - reference to the component
'''
if self.methods.has_key(method_name):
return self.methods[method_name]
else:
return None
#--------------------------------------------------------------------------
def setMethod(self, method_name, in_dict):
'''
Associates a method with a Python dictionary describing it.
'''
if not isfunction(in_dict):
code = copy(in_dict)
else:
code = in_dict
self.methods[method_name] = code
#--------------------------------------------------------------------------
def __checkCompRef(self):
'''
Helper method does a sanity check on the component reference member.
This method is just around because an enduser might try to to define
simulated component behavior long before the simulated component has
ever been instantiated.
'''
# print "*******", "__checkCompRef: Trying to get component reference for", self.compname
if self.comp_ref == None:
self.comp_ref = getComponent(self.compname)
| lgpl-2.1 |
zdw/xos | xos/tosca/resources/controller.py | 4 | 1722 | # note: this module named xossite.py instead of site.py due to conflict with
# /usr/lib/python2.7/site.py
import os
import pdb
import sys
import tempfile
sys.path.append("/opt/tosca")
from translator.toscalib.tosca_template import ToscaTemplate
from core.models import User,Controller,Deployment
from xosresource import XOSResource
class XOSController(XOSResource):
provides = "tosca.nodes.Controller"
xos_model = Controller
copyin_props = ["backend_type", "version", "auth_url", "admin_user", "admin_password", "admin_tenant", "domain", "rabbit_host", "rabbit_user", "rabbit_password"]
def get_xos_args(self):
args = super(XOSController, self).get_xos_args()
deployment_name = self.get_requirement("tosca.relationships.ControllerDeployment")
if deployment_name:
args["deployment"] = self.get_xos_object(Deployment, name=deployment_name)
return args
def create(self):
xos_args = self.get_xos_args()
if not xos_args.get("deployment",None):
raise Exception("Controller must have a deployment")
controller = Controller(**xos_args)
controller.caller = self.user
controller.save()
self.info("Created Controller '%s'" % (str(controller), ))
self.postprocess(controller)
def delete(self, obj):
if obj.controllersite.exists():
self.info("Controller %s has active sites; skipping delete" % obj.name)
return
for sd in obj.sitedeployments.all():
if sd.nodes.exists():
self.info("Controller %s has active nodes; skipping delete" % obj.name)
return
super(XOSController, self).delete(obj)
| apache-2.0 |
fibbo/DIRAC | Interfaces/scripts/dirac-dms-pfn-accessURL.py | 15 | 1415 | #!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-dms-pfn-accessURL
# Author : Stuart Paterson
########################################################################
"""
Retrieve an access URL for a PFN given a valid DIRAC SE
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... PFN SE' % Script.scriptName,
'Arguments:',
' PFN: Physical File Name or file containing PFNs',
' SE: Valid DIRAC SE' ] ) )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if len( args ) < 2:
Script.showHelp()
if len( args ) > 2:
print 'Only one PFN SE pair will be considered'
from DIRAC.Interfaces.API.Dirac import Dirac
dirac = Dirac()
exitCode = 0
pfn = args[0]
seName = args[1]
try:
f = open( pfn, 'r' )
pfns = f.read().splitlines()
f.close()
except:
pfns = [pfn]
for pfn in pfns:
result = dirac.getPhysicalFileAccessURL( pfn, seName, printOutput = True )
if not result['OK']:
print 'ERROR: ', result['Message']
exitCode = 2
DIRAC.exit( exitCode )
| gpl-3.0 |
duyetdev/openerp-6.1.1 | openerp/addons/stock_planning/stock_planning.py | 9 | 47400 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from osv import osv, fields
import netsvc
from tools.translate import _
import logging
import decimal_precision as dp
_logger = logging.getLogger('mps')
def rounding(fl, round_value):
if not round_value:
return fl
return round(fl / round_value) * round_value
# Periods have no company_id field as they can be shared across similar companies.
# If somone thinks different it can be improved.
class stock_period(osv.osv):
_name = "stock.period"
_description = "stock period"
_order = "date_start"
_columns = {
'name': fields.char('Period Name', size=64, required=True),
'date_start': fields.datetime('Start Date', required=True),
'date_stop': fields.datetime('End Date', required=True),
'state': fields.selection([('draft','Draft'), ('open','Open'),('close','Close')], 'State'),
}
_defaults = {
'state': 'draft'
}
def button_open(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'open'})
return True
def button_close(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'close'})
return True
stock_period()
# Stock and Sales Forecast object. Previously stock_planning_sale_prevision.
# A lot of changes in 1.1
class stock_sale_forecast(osv.osv):
_name = "stock.sale.forecast"
_columns = {
'company_id':fields.many2one('res.company', 'Company', required=True),
'create_uid': fields.many2one('res.users', 'Responsible'),
'name': fields.char('Name', size=64, readonly=True, states={'draft': [('readonly',False)]}),
'user_id': fields.many2one('res.users', 'Created/Validated by',readonly=True, \
help='Shows who created this forecast, or who validated.'),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', required=True, readonly=True, states={'draft': [('readonly',False)]}, \
help='Shows which warehouse this forecast concerns. '\
'If during stock planning you will need sales forecast for all warehouses choose any warehouse now.'),
'period_id': fields.many2one('stock.period', 'Period', required=True, readonly=True, states={'draft':[('readonly',False)]}, \
help = 'Shows which period this forecast concerns.'),
'product_id': fields.many2one('product.product', 'Product', readonly=True, required=True, states={'draft':[('readonly',False)]}, \
help = 'Shows which product this forecast concerns.'),
'product_qty': fields.float('Forecast Quantity', digits_compute=dp.get_precision('Product UoM'), required=True, readonly=True, \
states={'draft':[('readonly',False)]}, help= 'Forecast Product quantity.'),
'product_amt': fields.float('Product Amount', readonly=True, states={'draft':[('readonly',False)]}, \
help='Forecast value which will be converted to Product Quantity according to prices.'),
'product_uom_categ': fields.many2one('product.uom.categ', 'Product UoM Category'), # Invisible field for product_uom domain
'product_uom': fields.many2one('product.uom', 'Product UoM', required=True, readonly=True, states={'draft':[('readonly',False)]}, \
help = "Unit of Measure used to show the quantities of stock calculation." \
"You can use units form default category or from second category (UoS category)."),
'product_uos_categ' : fields.many2one('product.uom.categ', 'Product UoS Category'), # Invisible field for product_uos domain
# Field used in onchange_uom to check what uom was before change and recalculate quantities according to old uom (active_uom) and new uom.
'active_uom': fields.many2one('product.uom', string = "Active UoM"),
'state': fields.selection([('draft','Draft'),('validated','Validated')],'State',readonly=True),
'analyzed_period1_id': fields.many2one('stock.period', 'Period1', readonly=True, states={'draft':[('readonly',False)]},),
'analyzed_period2_id': fields.many2one('stock.period', 'Period2', readonly=True, states={'draft':[('readonly',False)]},),
'analyzed_period3_id': fields.many2one('stock.period', 'Period3', readonly=True, states={'draft':[('readonly',False)]},),
'analyzed_period4_id': fields.many2one('stock.period', 'Period4', readonly=True, states={'draft':[('readonly',False)]},),
'analyzed_period5_id': fields.many2one('stock.period' , 'Period5', readonly=True, states={'draft':[('readonly',False)]},),
'analyzed_user_id': fields.many2one('res.users', 'This User', required=False, readonly=True, states={'draft':[('readonly',False)]},),
'analyzed_team_id': fields.many2one('crm.case.section', 'Sales Team', required=False, \
readonly=True, states={'draft':[('readonly',False)]},),
'analyzed_warehouse_id': fields.many2one('stock.warehouse' , 'This Warehouse', required=False, \
readonly=True, states={'draft':[('readonly',False)]}),
'analyze_company': fields.boolean('Per Company', readonly=True, states={'draft':[('readonly',False)]}, \
help = "Check this box to see the sales for whole company."),
'analyzed_period1_per_user': fields.float('This User Period1', readonly=True),
'analyzed_period2_per_user': fields.float('This User Period2', readonly=True),
'analyzed_period3_per_user': fields.float('This User Period3', readonly=True),
'analyzed_period4_per_user': fields.float('This User Period4', readonly=True),
'analyzed_period5_per_user': fields.float('This User Period5', readonly=True),
'analyzed_period1_per_dept': fields.float('This Dept Period1', readonly=True),
'analyzed_period2_per_dept': fields.float('This Dept Period2', readonly=True),
'analyzed_period3_per_dept': fields.float('This Dept Period3', readonly=True),
'analyzed_period4_per_dept': fields.float('This Dept Period4', readonly=True),
'analyzed_period5_per_dept': fields.float('This Dept Period5', readonly=True),
'analyzed_period1_per_warehouse': fields.float('This Warehouse Period1', readonly=True),
'analyzed_period2_per_warehouse': fields.float('This Warehouse Period2', readonly=True),
'analyzed_period3_per_warehouse': fields.float('This Warehouse Period3', readonly=True),
'analyzed_period4_per_warehouse': fields.float('This Warehouse Period4', readonly=True),
'analyzed_period5_per_warehouse': fields.float('This Warehouse Period5', readonly=True),
'analyzed_period1_per_company': fields.float('This Company Period1', readonly=True),
'analyzed_period2_per_company': fields.float('This Company Period2', readonly=True),
'analyzed_period3_per_company': fields.float('This Company Period3', readonly=True),
'analyzed_period4_per_company': fields.float('This Company Period4', readonly=True),
'analyzed_period5_per_company': fields.float('This Company Period5', readonly=True),
}
_defaults = {
'user_id': lambda obj, cr, uid, context: uid,
'state': 'draft',
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.sale.forecast', context=c),
}
def action_validate(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state': 'validated','user_id': uid})
return True
def unlink(self, cr, uid, ids, context=None):
forecasts = self.read(cr, uid, ids, ['state'])
unlink_ids = []
for t in forecasts:
if t['state'] in ('draft'):
unlink_ids.append(t['id'])
else:
raise osv.except_osv(_('Invalid action !'), _('Cannot delete a validated sales forecast!'))
osv.osv.unlink(self, cr, uid, unlink_ids, context=context)
return True
def onchange_company(self, cr, uid, ids, company_id=False):
result = {}
if not company_id:
return result
result['warehouse_id'] = False
result['analyzed_user_id'] = False
result['analyzed_team_id'] = False
result['analyzed_warehouse_id'] = False
return {'value': result}
def product_id_change(self, cr, uid, ids, product_id=False):
ret = {}
if product_id:
product_rec = self.pool.get('product.product').browse(cr, uid, product_id)
ret['product_uom'] = product_rec.uom_id.id
ret['product_uom_categ'] = product_rec.uom_id.category_id.id
ret['product_uos_categ'] = product_rec.uos_id and product_rec.uos_id.category_id.id or False
ret['active_uom'] = product_rec.uom_id.id
else:
ret['product_uom'] = False
ret['product_uom_categ'] = False
ret['product_uos_categ'] = False
res = {'value': ret}
return res
def onchange_uom(self, cr, uid, ids, product_uom=False, product_qty=0.0,
active_uom=False, product_id=False):
ret = {}
if product_uom and product_id:
coeff_uom2def = self._to_default_uom_factor(cr, uid, product_id, active_uom, {})
coeff_def2uom, round_value = self._from_default_uom_factor(cr, uid, product_id, product_uom, {})
coeff = coeff_uom2def * coeff_def2uom
ret['product_qty'] = rounding(coeff * product_qty, round_value)
ret['active_uom'] = product_uom
return {'value': ret}
def product_amt_change(self, cr, uid, ids, product_amt=0.0, product_uom=False, product_id=False):
round_value = 1
qty = 0.0
if product_amt and product_id:
product = self.pool.get('product.product').browse(cr, uid, product_id)
coeff_def2uom = 1
if (product_uom != product.uom_id.id):
coeff_def2uom, round_value = self._from_default_uom_factor(cr, uid, product_id, product_uom, {})
qty = rounding(coeff_def2uom * product_amt/(product.product_tmpl_id.list_price), round_value)
res = {'value': {'product_qty': qty}}
return res
def _to_default_uom_factor(self, cr, uid, product_id, uom_id, context=None):
uom_obj = self.pool.get('product.uom')
product_obj = self.pool.get('product.product')
product = product_obj.browse(cr, uid, product_id, context=context)
uom = uom_obj.browse(cr, uid, uom_id, context=context)
coef = uom.factor
if uom.category_id.id <> product.uom_id.category_id.id:
coef = coef * product.uos_coeff
return product.uom_id.factor / coef
def _from_default_uom_factor(self, cr, uid, product_id, uom_id, context=None):
uom_obj = self.pool.get('product.uom')
product_obj = self.pool.get('product.product')
product = product_obj.browse(cr, uid, product_id, context=context)
uom = uom_obj.browse(cr, uid, uom_id, context=context)
res = uom.factor
if uom.category_id.id <> product.uom_id.category_id.id:
res = res * product.uos_coeff
return res / product.uom_id.factor, uom.rounding
def _sales_per_users(self, cr, uid, so, so_line, company, users):
cr.execute("SELECT sum(sol.product_uom_qty) FROM sale_order_line AS sol LEFT JOIN sale_order AS s ON (s.id = sol.order_id) " \
"WHERE (sol.id IN %s) AND (s.state NOT IN (\'draft\',\'cancel\')) AND (s.id IN %s) AND (s.company_id=%s) " \
"AND (s.user_id IN %s) " ,(tuple(so_line), tuple(so), company, tuple(users)))
ret = cr.fetchone()[0] or 0.0
return ret
def _sales_per_warehouse(self, cr, uid, so, so_line, company, shops):
cr.execute("SELECT sum(sol.product_uom_qty) FROM sale_order_line AS sol LEFT JOIN sale_order AS s ON (s.id = sol.order_id) " \
"WHERE (sol.id IN %s) AND (s.state NOT IN (\'draft\',\'cancel\')) AND (s.id IN %s)AND (s.company_id=%s) " \
"AND (s.shop_id IN %s)" ,(tuple(so_line), tuple(so), company, tuple(shops)))
ret = cr.fetchone()[0] or 0.0
return ret
def _sales_per_company(self, cr, uid, so, so_line, company):
cr.execute("SELECT sum(sol.product_uom_qty) FROM sale_order_line AS sol LEFT JOIN sale_order AS s ON (s.id = sol.order_id) " \
"WHERE (sol.id IN %s) AND (s.state NOT IN (\'draft\',\'cancel\')) AND (s.id IN %s) AND (s.company_id=%s)", (tuple(so_line), tuple(so), company))
ret = cr.fetchone()[0] or 0.0
return ret
def calculate_sales_history(self, cr, uid, ids, context, *args):
sales = [[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0],]
for obj in self.browse(cr, uid, ids, context=context):
periods = obj.analyzed_period1_id, obj.analyzed_period2_id, obj.analyzed_period3_id, obj.analyzed_period4_id, obj.analyzed_period5_id
so_obj = self.pool.get('sale.order')
so_line_obj = self.pool.get('sale.order.line')
so_line_product_ids = so_line_obj.search(cr, uid, [('product_id','=', obj.product_id.id)], context = context)
if so_line_product_ids:
shops = users = None
if obj.analyzed_warehouse_id:
shops = self.pool.get('sale.shop').search(cr, uid,[('warehouse_id','=', obj.analyzed_warehouse_id.id)], context = context)
if obj.analyzed_team_id:
users = [u.id for u in obj.analyzed_team_id.member_ids]
factor, _ = self._from_default_uom_factor(cr, uid, obj.product_id.id, obj.product_uom.id, context=context)
for i, period in enumerate(periods):
if period:
so_period_ids = so_obj.search(cr, uid, [('date_order','>=',period.date_start),('date_order','<=',period.date_stop) ], context = context)
if so_period_ids:
if obj.analyzed_user_id:
sales[i][0] = self._sales_per_users(cr, uid, so_period_ids, so_line_product_ids, obj.company_id.id, [obj.analyzed_user_id.id])
sales[i][0] *= factor
if users:
sales[i][1] = self._sales_per_users(cr, uid, so_period_ids, so_line_product_ids, obj.company_id.id, users)
sales[i][1] *= factor
if shops:
sales[i][2] = self._sales_per_warehouse(cr, uid, so_period_ids, so_line_product_ids, obj.company_id.id, shops)
sales[i][2] *= factor
if obj.analyze_company:
sales[i][3] = self._sales_per_company(cr, uid, so_period_ids, so_line_product_ids, obj.company_id.id, )
sales[i][3] *= factor
self.write(cr, uid, ids, {
'analyzed_period1_per_user': sales[0][0],
'analyzed_period2_per_user': sales[1][0],
'analyzed_period3_per_user': sales[2][0],
'analyzed_period4_per_user': sales[3][0],
'analyzed_period5_per_user': sales[4][0],
'analyzed_period1_per_dept': sales[0][1],
'analyzed_period2_per_dept': sales[1][1],
'analyzed_period3_per_dept': sales[2][1],
'analyzed_period4_per_dept': sales[3][1],
'analyzed_period5_per_dept': sales[4][1],
'analyzed_period1_per_warehouse': sales[0][2],
'analyzed_period2_per_warehouse': sales[1][2],
'analyzed_period3_per_warehouse': sales[2][2],
'analyzed_period4_per_warehouse': sales[3][2],
'analyzed_period5_per_warehouse': sales[4][2],
'analyzed_period1_per_company': sales[0][3],
'analyzed_period2_per_company': sales[1][3],
'analyzed_period3_per_company': sales[2][3],
'analyzed_period4_per_company': sales[3][3],
'analyzed_period5_per_company': sales[4][3],
})
return True
stock_sale_forecast()
# The main Stock Planning object
# A lot of changes by contributor in ver 1.1
class stock_planning(osv.osv):
_name = "stock.planning"
def _get_in_out(self, cr, uid, val, date_start, date_stop, direction, done, context=None):
if context is None:
context = {}
product_obj = self.pool.get('product.product')
mapping = {'in': {
'field': "incoming_qty",
'adapter': lambda x: x,
},
'out': {
'field': "outgoing_qty",
'adapter': lambda x: -x,
},
}
context['from_date'] = date_start
context['to_date'] = date_stop
locations = [val.warehouse_id.lot_stock_id.id,]
if not val.stock_only:
locations.extend([val.warehouse_id.lot_input_id.id, val.warehouse_id.lot_output_id.id])
context['location'] = locations
context['compute_child'] = True
prod_id = val.product_id.id
if done:
context.update({ 'states':('done',), 'what':(direction,) })
prod_ids = [prod_id]
st = product_obj.get_product_available(cr, uid, prod_ids, context=context)
res = mapping[direction]['adapter'](st.get(prod_id,0.0))
else:
product = product_obj.read(cr, uid, prod_id,[], context)
product_qty = product[mapping[direction]['field']]
res = mapping[direction]['adapter'](product_qty)
return res
def _get_outgoing_before(self, cr, uid, val, date_start, date_stop, context=None):
cr.execute("SELECT sum(planning.planned_outgoing), planning.product_uom \
FROM stock_planning AS planning \
LEFT JOIN stock_period AS period \
ON (planning.period_id = period.id) \
WHERE (period.date_stop >= %s) AND (period.date_stop <= %s) \
AND (planning.product_id = %s) AND (planning.company_id = %s) \
GROUP BY planning.product_uom", \
(date_start, date_stop, val.product_id.id, val.company_id.id,))
planning_qtys = cr.fetchall()
res = self._to_default_uom(cr, uid, val, planning_qtys, context)
return res
def _to_default_uom(self, cr, uid, val, qtys, context=None):
res_qty = 0
if qtys:
for qty, prod_uom in qtys:
coef = self._to_default_uom_factor(cr, uid, val.product_id.id, prod_uom, context=context)
res_qty += qty * coef
return res_qty
def _to_form_uom(self, cr, uid, val, qtys, context=None):
res_qty = 0
if qtys:
for qty, prod_uom in qtys:
coef = self._to_default_uom_factor(cr, uid, val.product_id.id, prod_uom, context=context)
res_coef, round_value = self._from_default_uom_factor(cr, uid, val.product_id.id, val.product_uom.id, context=context)
coef = coef * res_coef
res_qty += rounding(qty * coef, round_value)
return res_qty
def _get_forecast(self, cr, uid, ids, field_names, arg, context=None):
res = {}
for val in self.browse(cr, uid, ids, context=context):
res[val.id] = {}
valid_part = val.confirmed_forecasts_only and " AND state = 'validated'" or ""
cr.execute('SELECT sum(product_qty), product_uom \
FROM stock_sale_forecast \
WHERE product_id = %s AND period_id = %s AND company_id = %s '+valid_part+ \
'GROUP BY product_uom', \
(val.product_id.id,val.period_id.id, val.company_id.id))
company_qtys = cr.fetchall()
res[val.id]['company_forecast'] = self._to_form_uom(cr, uid, val, company_qtys, context)
cr.execute('SELECT sum(product_qty), product_uom \
FROM stock_sale_forecast \
WHERE product_id = %s and period_id = %s AND warehouse_id = %s ' + valid_part + \
'GROUP BY product_uom', \
(val.product_id.id,val.period_id.id, val.warehouse_id.id))
warehouse_qtys = cr.fetchall()
res[val.id]['warehouse_forecast'] = self._to_form_uom(cr, uid, val, warehouse_qtys, context)
# res[val.id]['warehouse_forecast'] = rounding(res[val.id]['warehouse_forecast'], val.product_id.uom_id.rounding)
return res
def _get_stock_start(self, cr, uid, val, date, context=None):
if context is None:
context = {}
context['from_date'] = None
context['to_date'] = date
locations = [val.warehouse_id.lot_stock_id.id,]
if not val.stock_only:
locations.extend([val.warehouse_id.lot_input_id.id, val.warehouse_id.lot_output_id.id])
context['location'] = locations
context['compute_child'] = True
product_obj = self.pool.get('product.product').read(cr, uid,val.product_id.id,[], context)
res = product_obj['qty_available'] # value for stock_start
return res
def _get_past_future(self, cr, uid, ids, field_names, arg, context=None):
res = {}
for val in self.browse(cr, uid, ids, context=context):
if val.period_id.date_stop < time.strftime('%Y-%m-%d'):
res[val.id] = 'Past'
else:
res[val.id] = 'Future'
return res
def _get_op(self, cr, uid, ids, field_names, arg, context=None): # op = OrderPoint
res = {}
for val in self.browse(cr, uid, ids, context=context):
res[val.id]={}
cr.execute("SELECT product_min_qty, product_max_qty, product_uom \
FROM stock_warehouse_orderpoint \
WHERE warehouse_id = %s AND product_id = %s AND active = 'TRUE'", (val.warehouse_id.id, val.product_id.id))
ret = cr.fetchone() or [0.0,0.0,False]
coef = 1
round_value = 1
if ret[2]:
coef = self._to_default_uom_factor(cr, uid, val.product_id.id, ret[2], context)
res_coef, round_value = self._from_default_uom_factor(cr, uid, val.product_id.id, val.product_uom.id, context=context)
coef = coef * res_coef
res[val.id]['minimum_op'] = rounding(ret[0]*coef, round_value)
res[val.id]['maximum_op'] = rounding(ret[1]*coef, round_value)
return res
def onchange_company(self, cr, uid, ids, company_id=False):
result = {}
if company_id:
result['warehouse_id'] = False
return {'value': result}
def onchange_uom(self, cr, uid, ids, product_uom=False, product_id=False, active_uom=False,
planned_outgoing=0.0, to_procure=0.0):
ret = {}
if not product_uom:
return {}
if active_uom:
coeff_uom2def = self._to_default_uom_factor(cr, uid, product_id, active_uom, {})
coeff_def2uom, round_value = self._from_default_uom_factor(cr, uid, product_id, product_uom, {})
coeff = coeff_uom2def * coeff_def2uom
ret['planned_outgoing'] = rounding(coeff * planned_outgoing, round_value)
ret['to_procure'] = rounding(coeff * to_procure, round_value)
ret['active_uom'] = product_uom
return {'value': ret}
_columns = {
'company_id': fields.many2one('res.company', 'Company', required = True),
'history': fields.text('Procurement History', readonly=True, help = "History of procurement or internal supply of this planning line."),
'state' : fields.selection([('draft','Draft'),('done','Done')],'State',readonly=True),
'period_id': fields.many2one('stock.period' , 'Period', required=True, \
help = 'Period for this planning. Requisition will be created for beginning of the period.', select=True),
'warehouse_id': fields.many2one('stock.warehouse','Warehouse', required=True),
'product_id': fields.many2one('product.product' , 'Product', required=True, help = 'Product which this planning is created for.'),
'product_uom_categ' : fields.many2one('product.uom.categ', 'Product UoM Category'), # Invisible field for product_uom domain
'product_uom': fields.many2one('product.uom', 'UoM', required=True, help = "Unit of Measure used to show the quantities of stock calculation." \
"You can use units from default category or from second category (UoS category)."),
'product_uos_categ': fields.many2one('product.uom.categ', 'Product UoM Category'), # Invisible field for product_uos domain
# Field used in onchange_uom to check what uom was before change to recalculate quantities according to old uom (active_uom) and new uom.
'active_uom': fields.many2one('product.uom', string = "Active UoM"), # It works only in Forecast
'planned_outgoing': fields.float('Planned Out', required=True, \
help = 'Enter planned outgoing quantity from selected Warehouse during the selected Period of selected Product. '\
'To plan this value look at Confirmed Out or Sales Forecasts. This value should be equal or greater than Confirmed Out.'),
'company_forecast': fields.function(_get_forecast, string ='Company Forecast', multi = 'company', \
help = 'All sales forecasts for whole company (for all Warehouses) of selected Product during selected Period.'),
'warehouse_forecast': fields.function(_get_forecast, string ='Warehouse Forecast', multi = 'warehouse',\
help = 'All sales forecasts for selected Warehouse of selected Product during selected Period.'),
'stock_simulation': fields.float('Stock Simulation', readonly =True, \
help = 'Stock simulation at the end of selected Period.\n For current period it is: \n' \
'Initial Stock - Already Out + Already In - Expected Out + Incoming Left.\n' \
'For periods ahead it is: \nInitial Stock - Planned Out Before + Incoming Before - Planned Out + Planned In.'),
'incoming': fields.float('Confirmed In', readonly=True, \
help = 'Quantity of all confirmed incoming moves in calculated Period.'),
'outgoing': fields.float('Confirmed Out', readonly=True, \
help = 'Quantity of all confirmed outgoing moves in calculated Period.'),
'incoming_left': fields.float('Incoming Left', readonly=True, \
help = 'Quantity left to Planned incoming quantity. This is calculated difference between Planned In and Confirmed In. ' \
'For current period Already In is also calculated. This value is used to create procurement for lacking quantity.'),
'outgoing_left': fields.float('Expected Out', readonly=True, \
help = 'Quantity expected to go out in selected period besides Confirmed Out. As a difference between Planned Out and Confirmed Out. ' \
'For current period Already Out is also calculated'),
'to_procure': fields.float(string='Planned In', required=True, \
help = 'Enter quantity which (by your plan) should come in. Change this value and observe Stock simulation. ' \
'This value should be equal or greater than Confirmed In.'),
'line_time': fields.function(_get_past_future, type='char', string='Past/Future'),
'minimum_op': fields.function(_get_op, type='float', string = 'Minimum Rule', multi= 'minimum', \
help = 'Minimum quantity set in Minimum Stock Rules for this Warehouse'),
'maximum_op': fields.function(_get_op, type='float', string = 'Maximum Rule', multi= 'maximum', \
help = 'Maximum quantity set in Minimum Stock Rules for this Warehouse'),
'outgoing_before': fields.float('Planned Out Before', readonly=True, \
help= 'Planned Out in periods before calculated. '\
'Between start date of current period and one day before start of calculated period.'),
'incoming_before': fields.float('Incoming Before', readonly = True, \
help= 'Confirmed incoming in periods before calculated (Including Already In). '\
'Between start date of current period and one day before start of calculated period.'),
'stock_start': fields.float('Initial Stock', readonly=True, \
help= 'Stock quantity one day before current period.'),
'already_out': fields.float('Already Out', readonly=True, \
help= 'Quantity which is already dispatched out of this warehouse in current period.'),
'already_in': fields.float('Already In', readonly=True, \
help= 'Quantity which is already picked up to this warehouse in current period.'),
'stock_only': fields.boolean("Stock Location Only", help = "Check to calculate stock location of selected warehouse only. " \
"If not selected calculation is made for input, stock and output location of warehouse."),
"procure_to_stock": fields.boolean("Procure To Stock Location", help = "Check to make procurement to stock location of selected warehouse. " \
"If not selected procurement will be made into input location of warehouse."),
"confirmed_forecasts_only": fields.boolean("Validated Forecasts", help = "Check to take validated forecasts only. " \
"If not checked system takes validated and draft forecasts."),
'supply_warehouse_id': fields.many2one('stock.warehouse','Source Warehouse', help = "Warehouse used as source in supply pick move created by 'Supply from Another Warehouse'."),
"stock_supply_location": fields.boolean("Stock Supply Location", help = "Check to supply from Stock location of Supply Warehouse. " \
"If not checked supply will be made from Output location of Supply Warehouse. Used in 'Supply from Another Warehouse' with Supply Warehouse."),
}
_defaults = {
'state': 'draft' ,
'to_procure': 0.0,
'planned_outgoing': 0.0,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.planning', context=c),
}
_order = 'period_id'
def _to_default_uom_factor(self, cr, uid, product_id, uom_id, context=None):
uom_obj = self.pool.get('product.uom')
product_obj = self.pool.get('product.product')
product = product_obj.browse(cr, uid, product_id, context=context)
uom = uom_obj.browse(cr, uid, uom_id, context=context)
coef = uom.factor
if uom.category_id.id != product.uom_id.category_id.id:
coef = coef * product.uos_coeff
return product.uom_id.factor / coef
def _from_default_uom_factor(self, cr, uid, product_id, uom_id, context=None):
uom_obj = self.pool.get('product.uom')
product_obj = self.pool.get('product.product')
product = product_obj.browse(cr, uid, product_id, context=context)
uom = uom_obj.browse(cr, uid, uom_id, context=context)
res = uom.factor
if uom.category_id.id != product.uom_id.category_id.id:
res = res * product.uos_coeff
return res / product.uom_id.factor, uom.rounding
def calculate_planning(self, cr, uid, ids, context, *args):
one_second = relativedelta(seconds=1)
today = datetime.today()
current_date_beginning_c = datetime(today.year, today.month, today.day)
current_date_end_c = current_date_beginning_c + relativedelta(days=1, seconds=-1) # to get hour 23:59:59
current_date_beginning = current_date_beginning_c.strftime('%Y-%m-%d %H:%M:%S')
current_date_end = current_date_end_c.strftime('%Y-%m-%d %H:%M:%S')
_logger.debug("Calculate Planning: current date beg: %s and end: %s", current_date_beginning, current_date_end)
for val in self.browse(cr, uid, ids, context=context):
day = datetime.strptime(val.period_id.date_start, '%Y-%m-%d %H:%M:%S')
dbefore = datetime(day.year, day.month, day.day) - one_second
day_before_calculated_period = dbefore.strftime('%Y-%m-%d %H:%M:%S') # one day before start of calculated period
_logger.debug("Day before calculated period: %s ", day_before_calculated_period)
cr.execute("SELECT date_start \
FROM stock_period AS period \
LEFT JOIN stock_planning AS planning \
ON (planning.period_id = period.id) \
WHERE (period.date_stop >= %s) AND (period.date_start <= %s) AND \
planning.product_id = %s", (current_date_end, current_date_end, val.product_id.id,)) #
date = cr.fetchone()
start_date_current_period = date and date[0] or False
start_date_current_period = start_date_current_period or current_date_beginning
day = datetime.strptime(start_date_current_period, '%Y-%m-%d %H:%M:%S')
dbefore = datetime(day.year, day.month, day.day) - one_second
date_for_start = dbefore.strftime('%Y-%m-%d %H:%M:%S') # one day before current period
_logger.debug("Date for start: %s", date_for_start)
already_out = self._get_in_out(cr, uid, val, start_date_current_period, current_date_end, direction='out', done=True, context=context),
already_in = self._get_in_out(cr, uid, val, start_date_current_period, current_date_end, direction='in', done=True, context=context),
outgoing = self._get_in_out(cr, uid, val, val.period_id.date_start, val.period_id.date_stop, direction='out', done=False, context=context),
incoming = self._get_in_out(cr, uid, val, val.period_id.date_start, val.period_id.date_stop, direction='in', done=False, context=context),
outgoing_before = self._get_outgoing_before(cr, uid, val, start_date_current_period, day_before_calculated_period, context=context),
incoming_before = self._get_in_out(cr, uid, val, start_date_current_period, day_before_calculated_period, direction='in', done=False, context=context),
stock_start = self._get_stock_start(cr, uid, val, date_for_start, context=context),
if start_date_current_period == val.period_id.date_start: # current period is calculated
current = True
else:
current = False
factor, round_value = self._from_default_uom_factor(cr, uid, val.product_id.id, val.product_uom.id, context=context)
self.write(cr, uid, ids, {
'already_out': rounding(already_out[0]*factor,round_value),
'already_in': rounding(already_in[0]*factor,round_value),
'outgoing': rounding(outgoing[0]*factor,round_value),
'incoming': rounding(incoming[0]*factor,round_value),
'outgoing_before' : rounding(outgoing_before[0]*factor,round_value),
'incoming_before': rounding((incoming_before[0]+ (not current and already_in[0]))*factor,round_value),
'outgoing_left': rounding(val.planned_outgoing - (outgoing[0] + (current and already_out[0]))*factor,round_value),
'incoming_left': rounding(val.to_procure - (incoming[0] + (current and already_in[0]))*factor,round_value),
'stock_start': rounding(stock_start[0]*factor,round_value),
'stock_simulation': rounding(val.to_procure - val.planned_outgoing + (stock_start[0]+ incoming_before[0] - outgoing_before[0] \
+ (not current and already_in[0]))*factor,round_value),
})
return True
# method below converts quantities and uoms to general OpenERP standard with UoM Qty, UoM, UoS Qty, UoS.
# from stock_planning standard where you have one Qty and one UoM (any from UoM or UoS category)
# so if UoM is from UoM category it is used as UoM in standard and if product has UoS the UoS will be calcualated.
# If UoM is from UoS category it is recalculated to basic UoS from product (in planning you can use any UoS from UoS category)
# and basic UoM is calculated.
def _qty_to_standard(self, cr, uid, val, context=None):
uos = False
uos_qty = 0.0
if val.product_uom.category_id.id == val.product_id.uom_id.category_id.id:
uom_qty = val.incoming_left
uom = val.product_uom.id
if val.product_id.uos_id:
uos = val.product_id.uos_id.id
coeff_uom2def = self._to_default_uom_factor(cr, uid, val.product_id.id, val.product_uom.id, {})
coeff_def2uom, round_value = self._from_default_uom_factor(cr, uid, val.product_id.id, uos, {})
uos_qty = rounding(val.incoming_left * coeff_uom2def * coeff_def2uom, round_value)
elif val.product_uom.category_id.id == val.product_id.uos_id.category_id.id:
coeff_uom2def = self._to_default_uom_factor(cr, uid, val.product_id.id, val.product_uom.id, {})
uos = val.product_id.uos_id.id
coeff_def2uom, round_value = self._from_default_uom_factor(cr, uid, val.product_id.id, uos, {})
uos_qty = rounding(val.incoming_left * coeff_uom2def * coeff_def2uom, round_value)
uom = val.product_id.uom_id.id
coeff_def2uom, round_value = self._from_default_uom_factor(cr, uid, val.product_id.id, uom, {})
uom_qty = rounding(val.incoming_left * coeff_uom2def * coeff_def2uom, round_value)
return uom_qty, uom, uos_qty, uos
def procure_incomming_left(self, cr, uid, ids, context, *args):
for obj in self.browse(cr, uid, ids, context=context):
if obj.incoming_left <= 0:
raise osv.except_osv(_('Error !'), _('Incoming Left must be greater than 0 !'))
uom_qty, uom, uos_qty, uos = self._qty_to_standard(cr, uid, obj, context)
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
proc_id = self.pool.get('procurement.order').create(cr, uid, {
'company_id' : obj.company_id.id,
'name': _('MPS planning for %s') %(obj.period_id.name),
'origin': _('MPS(%s) %s') %(user.login, obj.period_id.name),
'date_planned': obj.period_id.date_start,
'product_id': obj.product_id.id,
'product_qty': uom_qty,
'product_uom': uom,
'product_uos_qty': uos_qty,
'product_uos': uos,
'location_id': obj.procure_to_stock and obj.warehouse_id.lot_stock_id.id or obj.warehouse_id.lot_input_id.id,
'procure_method': 'make_to_order',
'note' : _(' Procurement created by MPS for user: %s Creation Date: %s \
\n For period: %s \
\n according to state: \
\n Warehouse Forecast: %s \
\n Initial Stock: %s \
\n Planned Out: %s Planned In: %s \
\n Already Out: %s Already In: %s \
\n Confirmed Out: %s Confirmed In: %s \
\n Planned Out Before: %s Confirmed In Before: %s \
\n Expected Out: %s Incoming Left: %s \
\n Stock Simulation: %s Minimum stock: %s') %(user.login, time.strftime('%Y-%m-%d %H:%M:%S'),
obj.period_id.name, obj.warehouse_forecast, obj.planned_outgoing, obj.stock_start, obj.to_procure,
obj.already_out, obj.already_in, obj.outgoing, obj.incoming, obj.outgoing_before, obj.incoming_before,
obj.outgoing_left, obj.incoming_left, obj.stock_simulation, obj.minimum_op)
}, context=context)
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'procurement.order', proc_id, 'button_confirm', cr)
self.calculate_planning(cr, uid, ids, context)
prev_text = obj.history or ""
self.write(cr, uid, ids, {
'history': _('%s Procurement (%s, %s) %s %s \n') % (prev_text, user.login, time.strftime('%Y.%m.%d %H:%M'),
obj.incoming_left, obj.product_uom.name)
})
return True
def internal_supply(self, cr, uid, ids, context, *args):
for obj in self.browse(cr, uid, ids, context=context):
if obj.incoming_left <= 0:
raise osv.except_osv(_('Error !'), _('Incoming Left must be greater than 0 !'))
if not obj.supply_warehouse_id:
raise osv.except_osv(_('Error !'), _('You must specify a Source Warehouse !'))
if obj.supply_warehouse_id.id == obj.warehouse_id.id:
raise osv.except_osv(_('Error !'), _('You must specify a Source Warehouse different than calculated (destination) Warehouse !'))
uom_qty, uom, uos_qty, uos = self._qty_to_standard(cr, uid, obj, context)
user = self.pool.get('res.users').browse(cr, uid, uid, context)
picking_id = self.pool.get('stock.picking').create(cr, uid, {
'origin': _('MPS(%s) %s') %(user.login, obj.period_id.name),
'type': 'internal',
'state': 'auto',
'date': obj.period_id.date_start,
'move_type': 'direct',
'invoice_state': 'none',
'company_id': obj.company_id.id,
'note': _('Pick created from MPS by user: %s Creation Date: %s \
\nFor period: %s according to state: \
\n Warehouse Forecast: %s \
\n Initial Stock: %s \
\n Planned Out: %s Planned In: %s \
\n Already Out: %s Already In: %s \
\n Confirmed Out: %s Confirmed In: %s \
\n Planned Out Before: %s Confirmed In Before: %s \
\n Expected Out: %s Incoming Left: %s \
\n Stock Simulation: %s Minimum stock: %s ')
% (user.login, time.strftime('%Y-%m-%d %H:%M:%S'), obj.period_id.name, obj.warehouse_forecast,
obj.stock_start, obj.planned_outgoing, obj.to_procure, obj.already_out, obj.already_in,
obj.outgoing, obj.incoming, obj.outgoing_before, obj.incoming_before,
obj.outgoing_left, obj.incoming_left, obj.stock_simulation, obj.minimum_op)
})
move_id = self.pool.get('stock.move').create(cr, uid, {
'name': _('MPS(%s) %s') %(user.login, obj.period_id.name),
'picking_id': picking_id,
'product_id': obj.product_id.id,
'date': obj.period_id.date_start,
'product_qty': uom_qty,
'product_uom': uom,
'product_uos_qty': uos_qty,
'product_uos': uos,
'location_id': obj.stock_supply_location and obj.supply_warehouse_id.lot_stock_id.id or \
obj.supply_warehouse_id.lot_output_id.id,
'location_dest_id': obj.procure_to_stock and obj.warehouse_id.lot_stock_id.id or \
obj.warehouse_id.lot_input_id.id,
'tracking_id': False,
'company_id': obj.company_id.id,
})
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'stock.picking', picking_id, 'button_confirm', cr)
self.calculate_planning(cr, uid, ids, context)
prev_text = obj.history or ""
pick_name = self.pool.get('stock.picking').browse(cr, uid, picking_id).name
self.write(cr, uid, ids, {
'history': _('%s Pick List %s (%s, %s) %s %s \n') % (prev_text, pick_name, user.login, time.strftime('%Y.%m.%d %H:%M'),
obj.incoming_left, obj.product_uom.name)
})
return True
def product_id_change(self, cr, uid, ids, product_id):
ret = {}
if product_id:
product_rec = self.pool.get('product.product').browse(cr, uid, product_id)
ret['product_uom'] = product_rec.uom_id.id
ret['active_uom'] = product_rec.uom_id.id
ret['product_uom_categ'] = product_rec.uom_id.category_id.id
ret['product_uos_categ'] = product_rec.uos_id and product_rec.uos_id.category_id.id or False
else:
ret['product_uom'] = False
ret['product_uom_categ'] = False
ret['product_uos_categ'] = False
res = {'value': ret}
return res
stock_planning()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
piquadrat/django | tests/delete_regress/tests.py | 17 | 13296 | import datetime
from django.db import connection, models, transaction
from django.test import TestCase, TransactionTestCase, skipUnlessDBFeature
from .models import (
Award, AwardNote, Book, Child, Eaten, Email, File, Food, FooFile,
FooFileProxy, FooImage, FooPhoto, House, Image, Item, Location, Login,
OrderedPerson, OrgUnit, Person, Photo, PlayedWith, PlayedWithNote, Policy,
Researcher, Toy, Version,
)
# Can't run this test under SQLite, because you can't
# get two connections to an in-memory database.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
class DeleteLockingTest(TransactionTestCase):
available_apps = ['delete_regress']
def setUp(self):
# Create a second connection to the default database
self.conn2 = connection.copy()
self.conn2.set_autocommit(False)
def tearDown(self):
# Close down the second connection.
self.conn2.rollback()
self.conn2.close()
def test_concurrent_delete(self):
"""Concurrent deletes don't collide and lock the database (#9479)."""
with transaction.atomic():
Book.objects.create(id=1, pagecount=100)
Book.objects.create(id=2, pagecount=200)
Book.objects.create(id=3, pagecount=300)
with transaction.atomic():
# Start a transaction on the main connection.
self.assertEqual(3, Book.objects.count())
# Delete something using another database connection.
with self.conn2.cursor() as cursor2:
cursor2.execute("DELETE from delete_regress_book WHERE id = 1")
self.conn2.commit()
# In the same transaction on the main connection, perform a
# queryset delete that covers the object deleted with the other
# connection. This causes an infinite loop under MySQL InnoDB
# unless we keep track of already deleted objects.
Book.objects.filter(pagecount__lt=250).delete()
self.assertEqual(1, Book.objects.count())
class DeleteCascadeTests(TestCase):
def test_generic_relation_cascade(self):
"""
Django cascades deletes through generic-related objects to their
reverse relations.
"""
person = Person.objects.create(name='Nelson Mandela')
award = Award.objects.create(name='Nobel', content_object=person)
AwardNote.objects.create(note='a peace prize',
award=award)
self.assertEqual(AwardNote.objects.count(), 1)
person.delete()
self.assertEqual(Award.objects.count(), 0)
# first two asserts are just sanity checks, this is the kicker:
self.assertEqual(AwardNote.objects.count(), 0)
def test_fk_to_m2m_through(self):
"""
If an M2M relationship has an explicitly-specified through model, and
some other model has an FK to that through model, deletion is cascaded
from one of the participants in the M2M, to the through model, to its
related model.
"""
juan = Child.objects.create(name='Juan')
paints = Toy.objects.create(name='Paints')
played = PlayedWith.objects.create(child=juan, toy=paints,
date=datetime.date.today())
PlayedWithNote.objects.create(played=played,
note='the next Jackson Pollock')
self.assertEqual(PlayedWithNote.objects.count(), 1)
paints.delete()
self.assertEqual(PlayedWith.objects.count(), 0)
# first two asserts just sanity checks, this is the kicker:
self.assertEqual(PlayedWithNote.objects.count(), 0)
def test_15776(self):
policy = Policy.objects.create(pk=1, policy_number="1234")
version = Version.objects.create(policy=policy)
location = Location.objects.create(version=version)
Item.objects.create(version=version, location=location)
policy.delete()
class DeleteCascadeTransactionTests(TransactionTestCase):
available_apps = ['delete_regress']
def test_inheritance(self):
"""
Auto-created many-to-many through tables referencing a parent model are
correctly found by the delete cascade when a child of that parent is
deleted.
Refs #14896.
"""
r = Researcher.objects.create()
email = Email.objects.create(
label="office-email", email_address="carl@science.edu"
)
r.contacts.add(email)
email.delete()
def test_to_field(self):
"""
Cascade deletion works with ForeignKey.to_field set to non-PK.
"""
apple = Food.objects.create(name="apple")
Eaten.objects.create(food=apple, meal="lunch")
apple.delete()
self.assertFalse(Food.objects.exists())
self.assertFalse(Eaten.objects.exists())
class LargeDeleteTests(TestCase):
def test_large_deletes(self):
"Regression for #13309 -- if the number of objects > chunk size, deletion still occurs"
for x in range(300):
Book.objects.create(pagecount=x + 100)
# attach a signal to make sure we will not fast-delete
def noop(*args, **kwargs):
pass
models.signals.post_delete.connect(noop, sender=Book)
Book.objects.all().delete()
models.signals.post_delete.disconnect(noop, sender=Book)
self.assertEqual(Book.objects.count(), 0)
class ProxyDeleteTest(TestCase):
"""
Tests on_delete behavior for proxy models.
See #16128.
"""
def create_image(self):
"""Return an Image referenced by both a FooImage and a FooFile."""
# Create an Image
test_image = Image()
test_image.save()
foo_image = FooImage(my_image=test_image)
foo_image.save()
# Get the Image instance as a File
test_file = File.objects.get(pk=test_image.pk)
foo_file = FooFile(my_file=test_file)
foo_file.save()
return test_image
def test_delete_proxy(self):
"""
Deleting the *proxy* instance bubbles through to its non-proxy and
*all* referring objects are deleted.
"""
self.create_image()
Image.objects.all().delete()
# An Image deletion == File deletion
self.assertEqual(len(Image.objects.all()), 0)
self.assertEqual(len(File.objects.all()), 0)
# The Image deletion cascaded and *all* references to it are deleted.
self.assertEqual(len(FooImage.objects.all()), 0)
self.assertEqual(len(FooFile.objects.all()), 0)
def test_delete_proxy_of_proxy(self):
"""
Deleting a proxy-of-proxy instance should bubble through to its proxy
and non-proxy parents, deleting *all* referring objects.
"""
test_image = self.create_image()
# Get the Image as a Photo
test_photo = Photo.objects.get(pk=test_image.pk)
foo_photo = FooPhoto(my_photo=test_photo)
foo_photo.save()
Photo.objects.all().delete()
# A Photo deletion == Image deletion == File deletion
self.assertEqual(len(Photo.objects.all()), 0)
self.assertEqual(len(Image.objects.all()), 0)
self.assertEqual(len(File.objects.all()), 0)
# The Photo deletion should have cascaded and deleted *all*
# references to it.
self.assertEqual(len(FooPhoto.objects.all()), 0)
self.assertEqual(len(FooFile.objects.all()), 0)
self.assertEqual(len(FooImage.objects.all()), 0)
def test_delete_concrete_parent(self):
"""
Deleting an instance of a concrete model should also delete objects
referencing its proxy subclass.
"""
self.create_image()
File.objects.all().delete()
# A File deletion == Image deletion
self.assertEqual(len(File.objects.all()), 0)
self.assertEqual(len(Image.objects.all()), 0)
# The File deletion should have cascaded and deleted *all* references
# to it.
self.assertEqual(len(FooFile.objects.all()), 0)
self.assertEqual(len(FooImage.objects.all()), 0)
def test_delete_proxy_pair(self):
"""
If a pair of proxy models are linked by an FK from one concrete parent
to the other, deleting one proxy model cascade-deletes the other, and
the deletion happens in the right order (not triggering an
IntegrityError on databases unable to defer integrity checks).
Refs #17918.
"""
# Create an Image (proxy of File) and FooFileProxy (proxy of FooFile,
# which has an FK to File)
image = Image.objects.create()
as_file = File.objects.get(pk=image.pk)
FooFileProxy.objects.create(my_file=as_file)
Image.objects.all().delete()
self.assertEqual(len(FooFileProxy.objects.all()), 0)
def test_19187_values(self):
msg = 'Cannot call delete() after .values() or .values_list()'
with self.assertRaisesMessage(TypeError, msg):
Image.objects.values().delete()
with self.assertRaisesMessage(TypeError, msg):
Image.objects.values_list().delete()
class Ticket19102Tests(TestCase):
"""
Test different queries which alter the SELECT clause of the query. We
also must be using a subquery for the deletion (that is, the original
query has a join in it). The deletion should be done as "fast-path"
deletion (that is, just one query for the .delete() call).
Note that .values() is not tested here on purpose. .values().delete()
doesn't work for non fast-path deletes at all.
"""
def setUp(self):
self.o1 = OrgUnit.objects.create(name='o1')
self.o2 = OrgUnit.objects.create(name='o2')
self.l1 = Login.objects.create(description='l1', orgunit=self.o1)
self.l2 = Login.objects.create(description='l2', orgunit=self.o2)
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_annotate(self):
with self.assertNumQueries(1):
Login.objects.order_by('description').filter(
orgunit__name__isnull=False
).annotate(
n=models.Count('description')
).filter(
n=1, pk=self.l1.pk
).delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_extra(self):
with self.assertNumQueries(1):
Login.objects.order_by('description').filter(
orgunit__name__isnull=False
).extra(
select={'extraf': '1'}
).filter(
pk=self.l1.pk
).delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
@skipUnlessDBFeature('can_distinct_on_fields')
def test_ticket_19102_distinct_on(self):
# Both Login objs should have same description so that only the one
# having smaller PK will be deleted.
Login.objects.update(description='description')
with self.assertNumQueries(1):
Login.objects.distinct('description').order_by('pk').filter(
orgunit__name__isnull=False
).delete()
# Assumed that l1 which is created first has smaller PK.
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_select_related(self):
with self.assertNumQueries(1):
Login.objects.filter(
pk=self.l1.pk
).filter(
orgunit__name__isnull=False
).order_by(
'description'
).select_related('orgunit').delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_defer(self):
with self.assertNumQueries(1):
Login.objects.filter(
pk=self.l1.pk
).filter(
orgunit__name__isnull=False
).order_by(
'description'
).only('id').delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
class OrderedDeleteTests(TestCase):
def test_meta_ordered_delete(self):
# When a subquery is performed by deletion code, the subquery must be
# cleared of all ordering. There was a but that caused _meta ordering
# to be used. Refs #19720.
h = House.objects.create(address='Foo')
OrderedPerson.objects.create(name='Jack', lives_in=h)
OrderedPerson.objects.create(name='Bob', lives_in=h)
OrderedPerson.objects.filter(lives_in__address='Foo').delete()
self.assertEqual(OrderedPerson.objects.count(), 0)
| bsd-3-clause |
AMOboxTV/AMOBox.LegoBuild | plugin.video.salts/salts_lib/url_dispatcher.py | 21 | 3803 | import log_utils
class URL_Dispatcher:
def __init__(self):
self.func_registry = {}
self.args_registry = {}
self.kwargs_registry = {}
def register(self, mode, args=None, kwargs=None):
"""
Decorator function to register a function as a plugin:// url endpoint
mode: the mode value passed in the plugin:// url
args: a list of strings that are the positional arguments to expect
kwargs: a list of strings that are the keyword arguments to expect
* Positional argument must be in the order the function expect
* kwargs can be in any order
* kwargs without positional arguments are supported by passing in a kwargs but no args
* If there are no arguments at all, just "mode" can be specified
"""
if args is None:
args = []
if kwargs is None:
kwargs = []
def decorator(f):
if mode in self.func_registry:
message = 'Error: %s already registered as %s' % (str(f), mode)
log_utils.log(message, log_utils.LOGERROR)
raise Exception(message)
# log_utils.log('registering function: |%s|->|%s|' % (mode,str(f)), xbmc.LOGDEBUG)
self.func_registry[mode.strip()] = f
self.args_registry[mode] = args
self.kwargs_registry[mode] = kwargs
# log_utils.log('registering args: |%s|-->(%s) and {%s}' % (mode, args, kwargs), xbmc.LOGDEBUG)
return f
return decorator
def dispatch(self, mode, queries):
"""
Dispatch function to execute function registered for the provided mode
mode: the string that the function was associated with
queries: a dictionary of the parameters to be passed to the called function
"""
if mode not in self.func_registry:
message = 'Error: Attempt to invoke unregistered mode |%s|' % (mode)
log_utils.log(message, log_utils.LOGERROR)
raise Exception(message)
args = []
kwargs = {}
unused_args = queries.copy()
if self.args_registry[mode]:
# positional arguments are all required
for arg in self.args_registry[mode]:
arg = arg.strip()
if arg in queries:
args.append(self.__coerce(queries[arg]))
del unused_args[arg]
else:
message = 'Error: mode |%s| requested argument |%s| but it was not provided.' % (mode, arg)
log_utils.log(message, log_utils.LOGERROR)
raise Exception(message)
if self.kwargs_registry[mode]:
# kwargs are optional
for arg in self.kwargs_registry[mode]:
arg = arg.strip()
if arg in queries:
kwargs[arg] = self.__coerce(queries[arg])
del unused_args[arg]
if 'mode' in unused_args: del unused_args['mode'] # delete mode last in case it's used by the target function
log_utils.log('Calling |%s| for mode |%s| with pos args |%s| and kwargs |%s|' % (self.func_registry[mode].__name__, mode, args, kwargs))
if unused_args: log_utils.log('Warning: Arguments |%s| were passed but unused by |%s| for mode |%s|' % (unused_args, self.func_registry[mode].__name__, mode))
self.func_registry[mode](*args, **kwargs)
# since all params are passed as strings, do any conversions necessary to get good types (e.g. boolean)
def __coerce(self, arg):
temp = arg.lower()
if temp == 'true':
return True
elif temp == 'false':
return False
elif temp == 'none':
return None
return arg
| gpl-2.0 |
azumimuo/family-xbmc-addon | script.module.youtube.dl/lib/youtube_dl/extractor/pyvideo.py | 76 | 2764 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import int_or_none
class PyvideoIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?pyvideo\.org/(?P<category>[^/]+)/(?P<id>[^/?#&.]+)'
_TESTS = [{
'url': 'http://pyvideo.org/pycon-us-2013/become-a-logging-expert-in-30-minutes.html',
'info_dict': {
'id': 'become-a-logging-expert-in-30-minutes',
},
'playlist_count': 2,
}, {
'url': 'http://pyvideo.org/pygotham-2012/gloriajw-spotifywitherikbernhardsson182m4v.html',
'md5': '5fe1c7e0a8aa5570330784c847ff6d12',
'info_dict': {
'id': '2542',
'ext': 'm4v',
'title': 'Gloriajw-SpotifyWithErikBernhardsson182.m4v',
},
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
category = mobj.group('category')
video_id = mobj.group('id')
entries = []
data = self._download_json(
'https://raw.githubusercontent.com/pyvideo/data/master/%s/videos/%s.json'
% (category, video_id), video_id, fatal=False)
if data:
for video in data['videos']:
video_url = video.get('url')
if video_url:
if video.get('type') == 'youtube':
entries.append(self.url_result(video_url, 'Youtube'))
else:
entries.append({
'id': compat_str(data.get('id') or video_id),
'url': video_url,
'title': data['title'],
'description': data.get('description') or data.get('summary'),
'thumbnail': data.get('thumbnail_url'),
'duration': int_or_none(data.get('duration')),
})
else:
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage)
media_urls = self._search_regex(
r'(?s)Media URL:(.+?)</li>', webpage, 'media urls')
for m in re.finditer(
r'<a[^>]+href=(["\'])(?P<url>http.+?)\1', media_urls):
media_url = m.group('url')
if re.match(r'https?://www\.youtube\.com/watch\?v=.*', media_url):
entries.append(self.url_result(media_url, 'Youtube'))
else:
entries.append({
'id': video_id,
'url': media_url,
'title': title,
})
return self.playlist_result(entries, video_id)
| gpl-2.0 |
kkampardi/Plinth | plinth/menu.py | 8 | 3637 | #
# This file is part of Plinth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.urls import reverse, reverse_lazy
class Menu(object):
"""One menu item."""
def __init__(self, label="", icon="", url="#", order=50):
"""label is the text that is displayed on the menu.
icon is the icon to be displayed next to the label.
Choose from the Glyphicon set:
http://twitter.github.com/bootstrap/base-css.html#icons
url is the url location that will be activated when the menu
item is selected.
order is the numerical rank of this item within the menu.
Lower order items appear closest to the top/left of the menu.
By convention, we use the spectrum between 0 and 100 to rank
orders, but feel free to disregard that. If you need more
granularity, don't bother renumbering things. Feel free to
use fractional orders.
"""
self.label = label
self.icon = icon
self.url = url
self.order = order
# TODO: With an ordered dictionary for self.items we could access the
# items by their URL directly instead of searching for them each time,
# which we do currently with the 'get' method
self.items = []
def get(self, urlname, url_args=None, url_kwargs=None):
"""Return a menu item with given URL name."""
url = reverse(urlname, args=url_args, kwargs=url_kwargs)
for item in self.items:
if str(item.url) == url:
return item
raise KeyError('Menu item not found')
def sorted_items(self):
"""Return menu items in sorted order according to current locale."""
return sorted(self.items, key=lambda x: (x.order, x.label))
def add_urlname(self, name, icon, urlname, short_description="", order=50, url_args=None,
url_kwargs=None):
"""Add a named URL to the menu (via add_item).
url_args and url_kwargs will be passed on to Django reverse().
"""
if short_description:
label = '{0} ({1})'.format(short_description, name)
else:
label = name
url = reverse_lazy(urlname, args=url_args, kwargs=url_kwargs)
return self.add_item(label, icon, url, order)
def add_item(self, label, icon, url, order=50):
"""Create a new menu item with given parameters, add it to this menu and
return it.
"""
item = Menu(label=label, icon=icon, url=url, order=order)
self.items.append(item)
return item
def active_item(self, request):
"""Return the first active item (e.g. submenu) that is found."""
for item in self.items:
if request.path.startswith(str(item.url)):
return item
main_menu = Menu()
def init():
"""Create main menu and other essential menus."""
main_menu.add_urlname('', 'glyphicon-download-alt', 'apps')
main_menu.add_urlname('', 'glyphicon-cog', 'system')
| agpl-3.0 |
chancecoin/chancecoin | lib/exceptions.py | 1 | 1314 | #! /usr/bin/python3
class ConfigurationError (Exception):
pass
class DatabaseError (Exception):
pass
class VersionError (Exception):
pass
class ClientVersionError (VersionError):
pass
class DatabaseVersionError (VersionError):
pass
class TransactionError(Exception):
pass
class InputError(Exception):
pass
class BitcoindError (Exception):
pass
class BitcoindRPCError (BitcoindError):
pass
class ZeroMQError (Exception):
pass
class FeeError (Exception):
pass
class BalanceError (Exception):
pass
class QuantityError(Exception):
pass
class InvalidAddressError (Exception):
pass
class VersionByteError (InvalidAddressError):
pass
class Base58Error (InvalidAddressError):
pass
class InvalidBase58Error (Base58Error):
pass
class Base58ChecksumError (Base58Error):
pass
class AssetError (Exception):
pass
class AssetNameError (AssetError):
pass
class AssetIDError (AssetError):
pass
class MessageError (Exception):
pass
class BurnError (MessageError):
pass
class SendError (MessageError):
pass
class OrderError (MessageError):
pass
class BetError (MessageError):
pass
class BTCPayError (MessageError):
pass
class CancelError (MessageError):
pass
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| mit |
CraigHarris/gpdb | gpAux/extensions/gmock/test/gmock_leak_test.py | 779 | 4384 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests that leaked mock objects can be caught be Google Mock."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gmock_test_utils
PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_leak_test_')
TEST_WITH_EXPECT_CALL = [PROGRAM_PATH, '--gtest_filter=*ExpectCall*']
TEST_WITH_ON_CALL = [PROGRAM_PATH, '--gtest_filter=*OnCall*']
TEST_MULTIPLE_LEAKS = [PROGRAM_PATH, '--gtest_filter=*MultipleLeaked*']
environ = gmock_test_utils.environ
SetEnvVar = gmock_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gmock_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
class GMockLeakTest(gmock_test_utils.TestCase):
def testCatchesLeakedMockByDefault(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL,
env=environ).exit_code)
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL,
env=environ).exit_code)
def testDoesNotCatchLeakedMockWhenDisabled(self):
self.assertEquals(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks=0'],
env=environ).exit_code)
self.assertEquals(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL +
['--gmock_catch_leaked_mocks=0'],
env=environ).exit_code)
def testCatchesLeakedMockWhenEnabled(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks'],
env=environ).exit_code)
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL +
['--gmock_catch_leaked_mocks'],
env=environ).exit_code)
def testCatchesLeakedMockWhenEnabledWithExplictFlagValue(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks=1'],
env=environ).exit_code)
def testCatchesMultipleLeakedMocks(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_MULTIPLE_LEAKS +
['--gmock_catch_leaked_mocks'],
env=environ).exit_code)
if __name__ == '__main__':
gmock_test_utils.Main()
| apache-2.0 |
petewarden/tensorflow_makefile | tensorflow/python/lib/io/python_io.py | 9 | 1794 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Data IO (Python Functions)
A TFRecords file represents a sequence of (binary) strings. The format is not
random access, so it is suitable for streaming large amounts of data but not
suitable if fast sharding or other non-sequential access is desired.
@@TFRecordWriter
@@tf_record_iterator
- - -
### TFRecords Format Details
A TFRecords file contains a sequence of strings with CRC hashes. Each record
has the format
uint64 length
uint32 masked_crc32_of_length
byte data[length]
uint32 masked_crc32_of_data
and the records are concatenated together to produce the file. The CRC32s
are [described here](https://en.wikipedia.org/wiki/Cyclic_redundancy_check),
and the mask of a CRC is
masked_crc = ((crc >> 15) | (crc << 17)) + 0xa282ead8ul
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.lib.io.tf_record import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import make_all
__all__ = make_all(__name__)
| apache-2.0 |
matrixise/odoo | addons/hr_holidays/tests/__init__.py | 121 | 1159 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.hr_holidays.tests import test_holidays_flow
checks = [
test_holidays_flow,
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
gabfl/vault | setup.py | 1 | 1471 | from setuptools import setup
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except(IOError, ImportError):
long_description = open('README.md').read()
setup(
name='pyvault',
version='2.4',
description='Python password manager',
long_description=long_description,
author='Gabriel Bordeaux',
author_email='pypi@gab.lc',
url='https://github.com/gabfl/vault',
license='MIT',
packages=['vault', 'vault.lib', 'vault.models',
'vault.modules', 'vault.views'],
package_dir={'vault': 'src'},
install_requires=['pycryptodome==3.9.9', 'pyperclip', 'tabulate',
'argparse', 'passwordgenerator', 'SQLAlchemy==1.3.22',
'sqlcipher3==0.4.5'], # external dependencies
entry_points={
'console_scripts': [
'vault = vault.vault:main',
],
},
classifiers=[ # see https://pypi.org/pypi?%3Aaction=list_classifiers
'Topic :: Security',
'Topic :: Security :: Cryptography',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS',
'Operating System :: POSIX :: Linux',
'Natural Language :: English',
# 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python',
'Development Status :: 4 - Beta',
# 'Development Status :: 5 - Production/Stable',
],
)
| mit |
ptisserand/ansible | lib/ansible/modules/network/cloudengine/ce_evpn_global.py | 65 | 6911 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_evpn_global
version_added: "2.4"
short_description: Manages global configuration of EVPN on HUAWEI CloudEngine switches.
description:
- Manages global configuration of EVPN on HUAWEI CloudEngine switches.
author: Zhijin Zhou (@CloudEngine-Ansible)
notes:
- Before configuring evpn_overlay_enable=disable, delete other EVPN configurations.
options:
evpn_overlay_enable:
description:
- Configure EVPN as the VXLAN control plane.
required: true
choices: ['enable','disable']
'''
EXAMPLES = '''
- name: evpn global module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Configure EVPN as the VXLAN control plan
ce_evpn_global:
evpn_overlay_enable: enable
provider: "{{ cli }}"
- name: Undo EVPN as the VXLAN control plan
ce_evpn_global:
evpn_overlay_enable: disable
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {
"evpn_overlay_enable": "enable"
}
existing:
description: k/v pairs of existing attributes on the device
returned: always
type: dict
sample: {
"evpn_overlay_enable": "disable"
}
end_state:
description: k/v pairs of end attributes on the interface
returned: always
type: dict
sample: {
"evpn_overlay_enable": "enable"
}
updates:
description: command list sent to the device
returned: always
type: list
sample: [
"evpn-overlay enable",
]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_config, load_config
from ansible.module_utils.network.cloudengine.ce import ce_argument_spec
class EvpnGlobal(object):
"""Manange global configuration of EVPN"""
def __init__(self, argument_spec, ):
self.spec = argument_spec
self.module = None
self.init_module()
# EVPN global configuration parameters
self.overlay_enable = self.module.params['evpn_overlay_enable']
self.commands = list()
self.global_info = dict()
self.conf_exist = False
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def init_module(self):
"""init_module"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def cli_load_config(self, commands):
"""load config by cli"""
if not self.module.check_mode:
load_config(self.module, commands)
def cli_add_command(self, command, undo=False):
"""add command to self.update_cmd and self.commands"""
if undo and command.lower() not in ["quit", "return"]:
cmd = "undo " + command
else:
cmd = command
self.commands.append(cmd) # set to device
if command.lower() not in ["quit", "return"]:
self.updates_cmd.append(cmd) # show updates result
def get_evpn_global_info(self):
""" get current EVPN global configration"""
self.global_info['evpnOverLay'] = 'disable'
flags = list()
exp = " | include evpn-overlay enable"
flags.append(exp)
config = get_config(self.module, flags)
if config:
self.global_info['evpnOverLay'] = 'enable'
def get_existing(self):
"""get existing config"""
self.existing = dict(
evpn_overlay_enable=self.global_info['evpnOverLay'])
def get_proposed(self):
"""get proposed config"""
self.proposed = dict(evpn_overlay_enable=self.overlay_enable)
def get_end_state(self):
"""get end config"""
self.get_evpn_global_info()
self.end_state = dict(
evpn_overlay_enable=self.global_info['evpnOverLay'])
def show_result(self):
""" show result"""
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def judge_if_config_exist(self):
""" judge whether configuration has existed"""
if self.overlay_enable == self.global_info['evpnOverLay']:
return True
return False
def config_evnp_global(self):
""" set global EVPN configration"""
if not self.conf_exist:
if self.overlay_enable == 'enable':
self.cli_add_command('evpn-overlay enable')
else:
self.cli_add_command('evpn-overlay enable', True)
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
def work(self):
"""excute task"""
self.get_evpn_global_info()
self.get_existing()
self.get_proposed()
self.conf_exist = self.judge_if_config_exist()
self.config_evnp_global()
self.get_end_state()
self.show_result()
def main():
"""main function entry"""
argument_spec = dict(
evpn_overlay_enable=dict(
required=True, type='str', choices=['enable', 'disable']),
)
argument_spec.update(ce_argument_spec)
evpn_global = EvpnGlobal(argument_spec)
evpn_global.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
aerickson/ansible | lib/ansible/utils/plugin_docs.py | 44 | 6418 | # (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import yaml
from collections import MutableMapping, MutableSet, MutableSequence
from ansible.module_utils.six import string_types
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.plugins import fragment_loader
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
# modules that are ok that they do not have documentation strings
BLACKLIST = {
'MODULE': frozenset(('async_wrapper',)),
'CACHE': frozenset(('base',)),
}
def add_fragments(doc, filename):
fragments = doc.get('extends_documentation_fragment', [])
if isinstance(fragments, string_types):
fragments = [fragments]
# Allow the module to specify a var other than DOCUMENTATION
# to pull the fragment from, using dot notation as a separator
for fragment_slug in fragments:
fragment_slug = fragment_slug.lower()
if '.' in fragment_slug:
fragment_name, fragment_var = fragment_slug.split('.', 1)
fragment_var = fragment_var.upper()
else:
fragment_name, fragment_var = fragment_slug, 'DOCUMENTATION'
fragment_class = fragment_loader.get(fragment_name)
assert fragment_class is not None
fragment_yaml = getattr(fragment_class, fragment_var, '{}')
fragment = AnsibleLoader(fragment_yaml, file_name=filename).get_single_data()
if 'notes' in fragment:
notes = fragment.pop('notes')
if notes:
if 'notes' not in doc:
doc['notes'] = []
doc['notes'].extend(notes)
if 'options' not in fragment:
raise Exception("missing options in fragment (%s), possibly misformatted?: %s" % (fragment_name, filename))
for key, value in fragment.items():
if key in doc:
# assumes both structures have same type
if isinstance(doc[key], MutableMapping):
value.update(doc[key])
elif isinstance(doc[key], MutableSet):
value.add(doc[key])
elif isinstance(doc[key], MutableSequence):
value = sorted(frozenset(value + doc[key]))
else:
raise Exception("Attempt to extend a documentation fragement (%s) of unknown type: %s" % (fragment_name, filename))
doc[key] = value
def get_docstring(filename, verbose=False):
"""
Search for assignment of the DOCUMENTATION and EXAMPLES variables
in the given file.
Parse DOCUMENTATION from YAML and return the YAML doc or None
together with EXAMPLES, as plain text.
DOCUMENTATION can be extended using documentation fragments
loaded by the PluginLoader from the module_docs_fragments
directory.
"""
data = {
'doc': None,
'plainexamples': None,
'returndocs': None,
'metadata': None
}
string_to_vars = {
'DOCUMENTATION': 'doc',
'EXAMPLES': 'plainexamples',
'RETURN': 'returndocs',
'ANSIBLE_METADATA': 'metadata'
}
try:
M = ast.parse(''.join(open(filename)))
try:
display.debug('Attempt first docstring is yaml docs')
docstring = yaml.load(M.body[0].value.s)
for string in string_to_vars.keys():
if string in docstring:
data[string_to_vars[string]] = docstring[string]
display.debug('assigned :%s' % string_to_vars[string])
except Exception as e:
display.debug('failed docstring parsing: %s' % str(e))
if 'docs' not in data or not data['docs']:
display.debug('Fallback to vars parsing')
for child in M.body:
if isinstance(child, ast.Assign):
for t in child.targets:
try:
theid = t.id
except AttributeError:
# skip errors can happen when trying to use the normal code
display.warning("Failed to assign id for %s on %s, skipping" % (t, filename))
continue
if theid in string_to_vars:
varkey = string_to_vars[theid]
if isinstance(child.value, ast.Dict):
data[varkey] = ast.literal_eval(child.value)
else:
if theid in ['DOCUMENTATION', 'ANSIBLE_METADATA']:
# string should be yaml
data[varkey] = AnsibleLoader(child.value.s, file_name=filename).get_single_data()
else:
# not yaml, should be a simple string
data[varkey] = child.value.s
display.debug('assigned :%s' % varkey)
# add fragments to documentation
if data['doc']:
add_fragments(data['doc'], filename)
# remove version
if data['metadata']:
for x in ('version', 'metadata_version'):
if x in data['metadata']:
del data['metadata'][x]
except:
display.error("unable to parse %s" % filename)
if verbose is True:
display.display("unable to parse %s" % filename)
raise
return data['doc'], data['plainexamples'], data['returndocs'], data['metadata']
| gpl-3.0 |
bird-house/twitcher | tests/functional/test_owsproxy_app.py | 2 | 1589 | """
Run tests OWSProxy tests with external WPS.
Please start `Emu WPS <https://emu.readthedocs.io/en/latest/>`_ on port 5000:
http://localhost:5000/wps
"""
import pytest
from .base import FunctionalTest
class OWSProxyAppTest(FunctionalTest):
def setUp(self):
super(OWSProxyAppTest, self).setUp()
self.init_database()
self.init_store()
self.config.include('twitcher.owsproxy')
self.app = self.test_app()
@pytest.mark.online
def test_getcaps(self):
resp = self.app.get('/ows/proxy/wps?service=wps&request=getcapabilities')
assert resp.status_code == 200
assert resp.content_type == 'text/xml'
resp.mustcontain('</wps:Capabilities>')
@pytest.mark.online
def test_describeprocess(self):
resp = self.app.get(
'/ows/proxy/wps?service=wps&request=describeprocess&version=1.0.0&identifier=dummyprocess')
assert resp.status_code == 200
assert resp.content_type == 'text/xml'
resp.mustcontain('</wps:ProcessDescriptions>')
@pytest.mark.online
def test_execute_allowed(self):
access_token = self.create_token()
url = "/ows/proxy/wps_secured?service=wps&request=execute&version=1.0.0&identifier=hello&DataInputs=name=tux&access_token={}".format(access_token) # noqa
resp = self.app.get(url)
assert resp.status_code == 200
assert resp.content_type == 'text/xml'
print(resp.body)
resp.mustcontain(
'<wps:ProcessSucceeded>PyWPS Process Say Hello finished</wps:ProcessSucceeded>')
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.