repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
SteveHNH/ansible | test/units/modules/remote_management/oneview/test_oneview_network_set.py | 77 | 6332 | # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from ansible.compat.tests import unittest, mock
from hpe_test_utils import OneViewBaseTestCase
from oneview_module_loader import NetworkSetModule
FAKE_MSG_ERROR = 'Fake message error'
NETWORK_SET = dict(
name='OneViewSDK Test Network Set',
networkUris=['/rest/ethernet-networks/aaa-bbb-ccc']
)
NETWORK_SET_WITH_NEW_NAME = dict(name='OneViewSDK Test Network Set - Renamed')
PARAMS_FOR_PRESENT = dict(
config='config.json',
state='present',
data=dict(name=NETWORK_SET['name'],
networkUris=['/rest/ethernet-networks/aaa-bbb-ccc'])
)
PARAMS_WITH_CHANGES = dict(
config='config.json',
state='present',
data=dict(name=NETWORK_SET['name'],
newName=NETWORK_SET['name'] + " - Renamed",
networkUris=['/rest/ethernet-networks/aaa-bbb-ccc', 'Name of a Network'])
)
PARAMS_FOR_ABSENT = dict(
config='config.json',
state='absent',
data=dict(name=NETWORK_SET['name'])
)
class NetworkSetModuleSpec(unittest.TestCase,
OneViewBaseTestCase):
"""
OneViewBaseTestCase has common tests for class constructor and main function,
also provides the mocks used in this test case.
"""
def setUp(self):
self.configure_mocks(self, NetworkSetModule)
self.resource = self.mock_ov_client.network_sets
self.ethernet_network_client = self.mock_ov_client.ethernet_networks
def test_should_create_new_network_set(self):
self.resource.get_by.return_value = []
self.resource.create.return_value = NETWORK_SET
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
NetworkSetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=NetworkSetModule.MSG_CREATED,
ansible_facts=dict(network_set=NETWORK_SET)
)
def test_should_not_update_when_data_is_equals(self):
self.resource.get_by.return_value = [NETWORK_SET]
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
NetworkSetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=NetworkSetModule.MSG_ALREADY_PRESENT,
ansible_facts=dict(network_set=NETWORK_SET)
)
def test_update_when_data_has_modified_attributes(self):
data_merged = dict(name=NETWORK_SET['name'] + " - Renamed",
networkUris=['/rest/ethernet-networks/aaa-bbb-ccc',
'/rest/ethernet-networks/ddd-eee-fff']
)
self.resource.get_by.side_effect = [NETWORK_SET], []
self.resource.update.return_value = data_merged
self.ethernet_network_client.get_by.return_value = [{'uri': '/rest/ethernet-networks/ddd-eee-fff'}]
self.mock_ansible_module.params = PARAMS_WITH_CHANGES
NetworkSetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=NetworkSetModule.MSG_UPDATED,
ansible_facts=dict(network_set=data_merged)
)
def test_should_raise_exception_when_ethernet_network_not_found(self):
self.resource.get_by.side_effect = [NETWORK_SET], []
self.ethernet_network_client.get_by.return_value = []
self.mock_ansible_module.params = PARAMS_WITH_CHANGES
NetworkSetModule().run()
self.mock_ansible_module.fail_json.assert_called_once_with(
exception=mock.ANY,
msg=NetworkSetModule.MSG_ETHERNET_NETWORK_NOT_FOUND + "Name of a Network"
)
def test_should_remove_network(self):
self.resource.get_by.return_value = [NETWORK_SET]
self.mock_ansible_module.params = PARAMS_FOR_ABSENT
NetworkSetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=NetworkSetModule.MSG_DELETED
)
def test_should_do_nothing_when_network_set_not_exist(self):
self.resource.get_by.return_value = []
self.mock_ansible_module.params = PARAMS_FOR_ABSENT
NetworkSetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=NetworkSetModule.MSG_ALREADY_ABSENT
)
def test_update_scopes_when_different(self):
params_to_scope = PARAMS_FOR_PRESENT.copy()
params_to_scope['data']['scopeUris'] = ['test']
self.mock_ansible_module.params = params_to_scope
resource_data = NETWORK_SET.copy()
resource_data['scopeUris'] = ['fake']
resource_data['uri'] = 'rest/network-sets/fake'
self.resource.get_by.return_value = [resource_data]
patch_return = resource_data.copy()
patch_return['scopeUris'] = ['test']
self.resource.patch.return_value = patch_return
NetworkSetModule().run()
self.resource.patch.assert_called_once_with('rest/network-sets/fake',
operation='replace',
path='/scopeUris',
value=['test'])
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
ansible_facts=dict(network_set=patch_return),
msg=NetworkSetModule.MSG_UPDATED
)
def test_should_do_nothing_when_scopes_are_the_same(self):
params_to_scope = PARAMS_FOR_PRESENT.copy()
params_to_scope['data']['scopeUris'] = ['test']
self.mock_ansible_module.params = params_to_scope
resource_data = NETWORK_SET.copy()
resource_data['scopeUris'] = ['test']
self.resource.get_by.return_value = [resource_data]
NetworkSetModule().run()
self.resource.patch.not_been_called()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(network_set=resource_data),
msg=NetworkSetModule.MSG_ALREADY_PRESENT
)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
thaumos/ansible | lib/ansible/plugins/httpapi/splunk.py | 37 | 2162 | # (c) 2019 Red Hat Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
author: Ansible Security Automation Team
httpapi : splunk
short_description: HttpApi Plugin for Splunk
description:
- This HttpApi plugin provides methods to connect to Splunk over a
HTTP(S)-based api.
version_added: "2.8"
"""
import json
from ansible.module_utils.basic import to_text
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.plugins.httpapi import HttpApiBase
from ansible.module_utils.connection import ConnectionError
BASE_HEADERS = {
'Content-Type': 'application/json',
}
class HttpApi(HttpApiBase):
def send_request(self, request_method, path, payload=None):
data = json.dumps(payload) if payload else '{}'
try:
self._display_request(request_method)
response, response_data = self.connection.send(path, data, method=request_method, headers=BASE_HEADERS, force_basic_auth=True)
value = self._get_response_value(response_data)
return response.getcode(), self._response_to_json(value)
except AnsibleConnectionFailure as e:
if to_text('401') in to_text(e):
return 401, 'Authentication failure'
else:
return 404, 'Object not found'
except HTTPError as e:
error = json.loads(e.read())
return e.code, error
def _display_request(self, request_method):
self.connection.queue_message('vvvv', 'Web Services: %s %s' % (request_method, self.connection._url))
def _get_response_value(self, response_data):
return to_text(response_data.getvalue())
def _response_to_json(self, response_text):
try:
return json.loads(response_text) if response_text else {}
# JSONDecodeError only available on Python 3.5+
except ValueError:
raise ConnectionError('Invalid JSON response: %s' % response_text)
| gpl-3.0 |
syaiful6/django | django/core/mail/__init__.py | 347 | 4697 | """
Tools for sending email.
"""
from __future__ import unicode_literals
from django.conf import settings
# Imported for backwards compatibility and for the sake
# of a cleaner namespace. These symbols used to be in
# django/core/mail.py before the introduction of email
# backends and the subsequent reorganization (See #10355)
from django.core.mail.message import (
DEFAULT_ATTACHMENT_MIME_TYPE, BadHeaderError, EmailMessage,
EmailMultiAlternatives, SafeMIMEMultipart, SafeMIMEText,
forbid_multi_line_headers, make_msgid,
)
from django.core.mail.utils import DNS_NAME, CachedDnsName
from django.utils.module_loading import import_string
__all__ = [
'CachedDnsName', 'DNS_NAME', 'EmailMessage', 'EmailMultiAlternatives',
'SafeMIMEText', 'SafeMIMEMultipart', 'DEFAULT_ATTACHMENT_MIME_TYPE',
'make_msgid', 'BadHeaderError', 'forbid_multi_line_headers',
'get_connection', 'send_mail', 'send_mass_mail', 'mail_admins',
'mail_managers',
]
def get_connection(backend=None, fail_silently=False, **kwds):
"""Load an email backend and return an instance of it.
If backend is None (default) settings.EMAIL_BACKEND is used.
Both fail_silently and other keyword arguments are used in the
constructor of the backend.
"""
klass = import_string(backend or settings.EMAIL_BACKEND)
return klass(fail_silently=fail_silently, **kwds)
def send_mail(subject, message, from_email, recipient_list,
fail_silently=False, auth_user=None, auth_password=None,
connection=None, html_message=None):
"""
Easy wrapper for sending a single message to a recipient list. All members
of the recipient list will see the other recipients in the 'To' field.
If auth_user is None, the EMAIL_HOST_USER setting is used.
If auth_password is None, the EMAIL_HOST_PASSWORD setting is used.
Note: The API for this method is frozen. New code wanting to extend the
functionality should use the EmailMessage class directly.
"""
connection = connection or get_connection(username=auth_user,
password=auth_password,
fail_silently=fail_silently)
mail = EmailMultiAlternatives(subject, message, from_email, recipient_list,
connection=connection)
if html_message:
mail.attach_alternative(html_message, 'text/html')
return mail.send()
def send_mass_mail(datatuple, fail_silently=False, auth_user=None,
auth_password=None, connection=None):
"""
Given a datatuple of (subject, message, from_email, recipient_list), sends
each message to each recipient list. Returns the number of emails sent.
If from_email is None, the DEFAULT_FROM_EMAIL setting is used.
If auth_user and auth_password are set, they're used to log in.
If auth_user is None, the EMAIL_HOST_USER setting is used.
If auth_password is None, the EMAIL_HOST_PASSWORD setting is used.
Note: The API for this method is frozen. New code wanting to extend the
functionality should use the EmailMessage class directly.
"""
connection = connection or get_connection(username=auth_user,
password=auth_password,
fail_silently=fail_silently)
messages = [EmailMessage(subject, message, sender, recipient,
connection=connection)
for subject, message, sender, recipient in datatuple]
return connection.send_messages(messages)
def mail_admins(subject, message, fail_silently=False, connection=None,
html_message=None):
"""Sends a message to the admins, as defined by the ADMINS setting."""
if not settings.ADMINS:
return
mail = EmailMultiAlternatives('%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject),
message, settings.SERVER_EMAIL, [a[1] for a in settings.ADMINS],
connection=connection)
if html_message:
mail.attach_alternative(html_message, 'text/html')
mail.send(fail_silently=fail_silently)
def mail_managers(subject, message, fail_silently=False, connection=None,
html_message=None):
"""Sends a message to the managers, as defined by the MANAGERS setting."""
if not settings.MANAGERS:
return
mail = EmailMultiAlternatives('%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject),
message, settings.SERVER_EMAIL, [a[1] for a in settings.MANAGERS],
connection=connection)
if html_message:
mail.attach_alternative(html_message, 'text/html')
mail.send(fail_silently=fail_silently)
| bsd-3-clause |
kevintaw/django | tests/migrate_signals/tests.py | 324 | 3585 | from django.apps import apps
from django.core import management
from django.db.models import signals
from django.test import TestCase, override_settings
from django.utils import six
APP_CONFIG = apps.get_app_config('migrate_signals')
PRE_MIGRATE_ARGS = ['app_config', 'verbosity', 'interactive', 'using']
MIGRATE_DATABASE = 'default'
MIGRATE_VERBOSITY = 1
MIGRATE_INTERACTIVE = False
class PreMigrateReceiver(object):
def __init__(self):
self.call_counter = 0
self.call_args = None
def __call__(self, signal, sender, **kwargs):
self.call_counter = self.call_counter + 1
self.call_args = kwargs
class OneTimeReceiver(object):
"""
Special receiver for handle the fact that test runner calls migrate for
several databases and several times for some of them.
"""
def __init__(self):
self.call_counter = 0
self.call_args = None
def __call__(self, signal, sender, **kwargs):
# Although test runner calls migrate for several databases,
# testing for only one of them is quite sufficient.
if kwargs['using'] == MIGRATE_DATABASE:
self.call_counter = self.call_counter + 1
self.call_args = kwargs
# we need to test only one call of migrate
signals.pre_migrate.disconnect(pre_migrate_receiver, sender=APP_CONFIG)
# We connect receiver here and not in unit test code because we need to
# connect receiver before test runner creates database. That is, sequence of
# actions would be:
#
# 1. Test runner imports this module.
# 2. We connect receiver.
# 3. Test runner calls migrate for create default database.
# 4. Test runner execute our unit test code.
pre_migrate_receiver = OneTimeReceiver()
signals.pre_migrate.connect(pre_migrate_receiver, sender=APP_CONFIG)
class MigrateSignalTests(TestCase):
available_apps = ['migrate_signals']
def test_pre_migrate_call_time(self):
self.assertEqual(pre_migrate_receiver.call_counter, 1)
def test_pre_migrate_args(self):
r = PreMigrateReceiver()
signals.pre_migrate.connect(r, sender=APP_CONFIG)
management.call_command('migrate', database=MIGRATE_DATABASE,
verbosity=MIGRATE_VERBOSITY, interactive=MIGRATE_INTERACTIVE,
stdout=six.StringIO())
args = r.call_args
self.assertEqual(r.call_counter, 1)
self.assertEqual(set(args), set(PRE_MIGRATE_ARGS))
self.assertEqual(args['app_config'], APP_CONFIG)
self.assertEqual(args['verbosity'], MIGRATE_VERBOSITY)
self.assertEqual(args['interactive'], MIGRATE_INTERACTIVE)
self.assertEqual(args['using'], 'default')
@override_settings(MIGRATION_MODULES={'migrate_signals': 'migrate_signals.custom_migrations'})
def test_pre_migrate_migrations_only(self):
"""
If all apps have migrations, pre_migrate should be sent.
"""
r = PreMigrateReceiver()
signals.pre_migrate.connect(r, sender=APP_CONFIG)
stdout = six.StringIO()
management.call_command('migrate', database=MIGRATE_DATABASE,
verbosity=MIGRATE_VERBOSITY, interactive=MIGRATE_INTERACTIVE,
stdout=stdout)
args = r.call_args
self.assertEqual(r.call_counter, 1)
self.assertEqual(set(args), set(PRE_MIGRATE_ARGS))
self.assertEqual(args['app_config'], APP_CONFIG)
self.assertEqual(args['verbosity'], MIGRATE_VERBOSITY)
self.assertEqual(args['interactive'], MIGRATE_INTERACTIVE)
self.assertEqual(args['using'], 'default')
| bsd-3-clause |
luhanhan/horizon | openstack_dashboard/api/fwaas.py | 36 | 10587 | # Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from django.utils.datastructures import SortedDict
from horizon.utils import memoized
from openstack_dashboard.api import neutron
neutronclient = neutron.neutronclient
class Rule(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron firewall rule."""
def get_dict(self):
rule_dict = self._apidict
rule_dict['rule_id'] = rule_dict['id']
return rule_dict
class Policy(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron firewall policy."""
def get_dict(self):
policy_dict = self._apidict
policy_dict['policy_id'] = policy_dict['id']
return policy_dict
class Firewall(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron firewall."""
def __init__(self, apiresource):
apiresource['admin_state'] = \
'UP' if apiresource['admin_state_up'] else 'DOWN'
super(Firewall, self).__init__(apiresource)
def get_dict(self):
firewall_dict = self._apidict
firewall_dict['firewall_id'] = firewall_dict['id']
return firewall_dict
def rule_create(request, **kwargs):
"""Create a firewall rule
:param request: request context
:param name: name for rule
:param description: description for rule
:param protocol: protocol for rule
:param action: action for rule
:param source_ip_address: source IP address or subnet
:param source_port: integer in [1, 65535] or range in a:b
:param destination_ip_address: destination IP address or subnet
:param destination_port: integer in [1, 65535] or range in a:b
:param shared: boolean (default false)
:param enabled: boolean (default true)
:return: Rule object
"""
body = {'firewall_rule': kwargs}
rule = neutronclient(request).create_firewall_rule(
body).get('firewall_rule')
return Rule(rule)
def rule_list(request, **kwargs):
return _rule_list(request, expand_policy=True, **kwargs)
def rule_list_for_tenant(request, tenant_id, **kwargs):
"""Return a rule list available for the tenant.
The list contains rules owned by the tenant and shared rules.
This is required because Neutron returns all resources including
all tenants if a user has admin role.
"""
rules = rule_list(request, tenant_id=tenant_id, shared=False, **kwargs)
shared_rules = rule_list(request, shared=True, **kwargs)
return rules + shared_rules
def _rule_list(request, expand_policy, **kwargs):
rules = neutronclient(request).list_firewall_rules(
**kwargs).get('firewall_rules')
if expand_policy and rules:
policies = _policy_list(request, expand_rule=False)
policy_dict = SortedDict((p.id, p) for p in policies)
for rule in rules:
rule['policy'] = policy_dict.get(rule['firewall_policy_id'])
return [Rule(r) for r in rules]
def rule_get(request, rule_id):
return _rule_get(request, rule_id, expand_policy=True)
def _rule_get(request, rule_id, expand_policy):
rule = neutronclient(request).show_firewall_rule(
rule_id).get('firewall_rule')
if expand_policy:
if rule['firewall_policy_id']:
rule['policy'] = _policy_get(request, rule['firewall_policy_id'],
expand_rule=False)
else:
rule['policy'] = None
return Rule(rule)
def rule_delete(request, rule_id):
neutronclient(request).delete_firewall_rule(rule_id)
def rule_update(request, rule_id, **kwargs):
body = {'firewall_rule': kwargs}
rule = neutronclient(request).update_firewall_rule(
rule_id, body).get('firewall_rule')
return Rule(rule)
def policy_create(request, **kwargs):
"""Create a firewall policy
:param request: request context
:param name: name for policy
:param description: description for policy
:param firewall_rules: ordered list of rules in policy
:param shared: boolean (default false)
:param audited: boolean (default false)
:return: Policy object
"""
body = {'firewall_policy': kwargs}
policy = neutronclient(request).create_firewall_policy(
body).get('firewall_policy')
return Policy(policy)
def policy_list(request, **kwargs):
return _policy_list(request, expand_rule=True, **kwargs)
def policy_list_for_tenant(request, tenant_id, **kwargs):
"""Return a policy list available for the tenant.
The list contains policies owned by the tenant and shared policies.
This is required because Neutron returns all resources including
all tenants if a user has admin role.
"""
policies = policy_list(request, tenant_id=tenant_id,
shared=False, **kwargs)
shared_policies = policy_list(request, shared=True, **kwargs)
return policies + shared_policies
def _policy_list(request, expand_rule, **kwargs):
policies = neutronclient(request).list_firewall_policies(
**kwargs).get('firewall_policies')
if expand_rule and policies:
rules = _rule_list(request, expand_policy=False)
rule_dict = SortedDict((rule.id, rule) for rule in rules)
for p in policies:
p['rules'] = [rule_dict.get(rule) for rule in p['firewall_rules']]
return [Policy(p) for p in policies]
def policy_get(request, policy_id):
return _policy_get(request, policy_id, expand_rule=True)
def _policy_get(request, policy_id, expand_rule):
policy = neutronclient(request).show_firewall_policy(
policy_id).get('firewall_policy')
if expand_rule:
policy_rules = policy['firewall_rules']
if policy_rules:
rules = _rule_list(request, expand_policy=False,
firewall_policy_id=policy_id)
rule_dict = SortedDict((rule.id, rule) for rule in rules)
policy['rules'] = [rule_dict.get(rule) for rule in policy_rules]
else:
policy['rules'] = []
return Policy(policy)
def policy_delete(request, policy_id):
neutronclient(request).delete_firewall_policy(policy_id)
def policy_update(request, policy_id, **kwargs):
body = {'firewall_policy': kwargs}
policy = neutronclient(request).update_firewall_policy(
policy_id, body).get('firewall_policy')
return Policy(policy)
def policy_insert_rule(request, policy_id, **kwargs):
policy = neutronclient(request).firewall_policy_insert_rule(
policy_id, kwargs)
return Policy(policy)
def policy_remove_rule(request, policy_id, **kwargs):
policy = neutronclient(request).firewall_policy_remove_rule(
policy_id, kwargs)
return Policy(policy)
def firewall_create(request, **kwargs):
"""Create a firewall for specified policy
:param request: request context
:param name: name for firewall
:param description: description for firewall
:param firewall_policy_id: policy id used by firewall
:param shared: boolean (default false)
:param admin_state_up: boolean (default true)
:return: Firewall object
"""
body = {'firewall': kwargs}
firewall = neutronclient(request).create_firewall(body).get('firewall')
return Firewall(firewall)
def firewall_list(request, **kwargs):
return _firewall_list(request, expand_policy=True, **kwargs)
def firewall_list_for_tenant(request, tenant_id, **kwargs):
"""Return a firewall list available for the tenant.
The list contains firewalls owned by the tenant and shared firewalls.
This is required because Neutron returns all resources including
all tenants if a user has admin role.
"""
# NOTE(amotoki): At now 'shared' attribute is not visible in Neutron
# and there is no way to query shared firewalls explicitly.
# Thus this method returns the same as when tenant_id is specified,
# but I would like to have this method for symmetry to firewall
# rules and policies to avoid unnecessary confusion.
return firewall_list(request, tenant_id=tenant_id, **kwargs)
def _firewall_list(request, expand_policy, **kwargs):
firewalls = neutronclient(request).list_firewalls(
**kwargs).get('firewalls')
if expand_policy and firewalls:
policies = _policy_list(request, expand_rule=False)
policy_dict = SortedDict((p.id, p) for p in policies)
for fw in firewalls:
fw['policy'] = policy_dict.get(fw['firewall_policy_id'])
return [Firewall(f) for f in firewalls]
def firewall_get(request, firewall_id):
return _firewall_get(request, firewall_id, expand_policy=True)
def _firewall_get(request, firewall_id, expand_policy):
firewall = neutronclient(request).show_firewall(
firewall_id).get('firewall')
if expand_policy:
policy_id = firewall['firewall_policy_id']
if policy_id:
firewall['policy'] = _policy_get(request, policy_id,
expand_rule=False)
else:
firewall['policy'] = None
return Firewall(firewall)
def firewall_delete(request, firewall_id):
neutronclient(request).delete_firewall(firewall_id)
def firewall_update(request, firewall_id, **kwargs):
body = {'firewall': kwargs}
firewall = neutronclient(request).update_firewall(
firewall_id, body).get('firewall')
return Firewall(firewall)
@memoized.memoized
def firewall_unassociated_routers_list(request, tenant_id):
all_routers = neutron.router_list(request, tenant_id=tenant_id)
tenant_firewalls = firewall_list_for_tenant(request, tenant_id=tenant_id)
firewall_router_ids = [rid
for fw in tenant_firewalls
for rid in getattr(fw, 'router_ids', [])]
available_routers = [r for r in all_routers
if r.id not in firewall_router_ids]
available_routers = sorted(available_routers,
key=lambda router: router.name_or_id)
return available_routers
| apache-2.0 |
kleientertainment/ds_mod_tools | pkg/win32/Python27/Lib/pprint.py | 2 | 12361 | # Author: Fred L. Drake, Jr.
# fdrake@acm.org
#
# This is a simple little module I wrote to make life easier. I didn't
# see anything quite like it in the library, though I may have overlooked
# something. I wrote this when I was trying to read some heavily nested
# tuples with fairly non-descriptive content. This is modeled very much
# after Lisp/Scheme - style pretty-printing of lists. If you find it
# useful, thank small children who sleep at night.
"""Support to pretty-print lists, tuples, & dictionaries recursively.
Very simple, but useful, especially in debugging data structures.
Classes
-------
PrettyPrinter()
Handle pretty-printing operations onto a stream using a configured
set of formatting parameters.
Functions
---------
pformat()
Format a Python object into a pretty-printed representation.
pprint()
Pretty-print a Python object to a stream [default is sys.stdout].
saferepr()
Generate a 'standard' repr()-like value, but protect against recursive
data structures.
"""
import sys as _sys
import warnings
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
__all__ = ["pprint","pformat","isreadable","isrecursive","saferepr",
"PrettyPrinter"]
# cache these for faster access:
_commajoin = ", ".join
_id = id
_len = len
_type = type
def pprint(object, stream=None, indent=1, width=80, depth=None):
"""Pretty-print a Python object to a stream [default is sys.stdout]."""
printer = PrettyPrinter(
stream=stream, indent=indent, width=width, depth=depth)
printer.pprint(object)
def pformat(object, indent=1, width=80, depth=None):
"""Format a Python object into a pretty-printed representation."""
return PrettyPrinter(indent=indent, width=width, depth=depth).pformat(object)
def saferepr(object):
"""Version of repr() which can handle recursive data structures."""
return _safe_repr(object, {}, None, 0)[0]
def isreadable(object):
"""Determine if saferepr(object) is readable by eval()."""
return _safe_repr(object, {}, None, 0)[1]
def isrecursive(object):
"""Determine if object requires a recursive representation."""
return _safe_repr(object, {}, None, 0)[2]
def _sorted(iterable):
with warnings.catch_warnings():
if _sys.py3kwarning:
warnings.filterwarnings("ignore", "comparing unequal types "
"not supported", DeprecationWarning)
return sorted(iterable)
class PrettyPrinter:
def __init__(self, indent=1, width=80, depth=None, stream=None):
"""Handle pretty printing operations onto a stream using a set of
configured parameters.
indent
Number of spaces to indent for each level of nesting.
width
Attempted maximum number of columns in the output.
depth
The maximum depth to print out nested structures.
stream
The desired output stream. If omitted (or false), the standard
output stream available at construction will be used.
"""
indent = int(indent)
width = int(width)
assert indent >= 0, "indent must be >= 0"
assert depth is None or depth > 0, "depth must be > 0"
assert width, "width must be != 0"
self._depth = depth
self._indent_per_level = indent
self._width = width
if stream is not None:
self._stream = stream
else:
self._stream = _sys.stdout
def pprint(self, object):
self._format(object, self._stream, 0, 0, {}, 0)
self._stream.write("\n")
def pformat(self, object):
sio = _StringIO()
self._format(object, sio, 0, 0, {}, 0)
return sio.getvalue()
def isrecursive(self, object):
return self.format(object, {}, 0, 0)[2]
def isreadable(self, object):
s, readable, recursive = self.format(object, {}, 0, 0)
return readable and not recursive
def _format(self, object, stream, indent, allowance, context, level):
level = level + 1
objid = _id(object)
if objid in context:
stream.write(_recursion(object))
self._recursive = True
self._readable = False
return
rep = self._repr(object, context, level - 1)
typ = _type(object)
sepLines = _len(rep) > (self._width - 1 - indent - allowance)
write = stream.write
if self._depth and level > self._depth:
write(rep)
return
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict) and r is dict.__repr__:
write('{')
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
length = _len(object)
if length:
context[objid] = 1
indent = indent + self._indent_per_level
items = _sorted(object.items())
key, ent = items[0]
rep = self._repr(key, context, level)
write(rep)
write(': ')
self._format(ent, stream, indent + _len(rep) + 2,
allowance + 1, context, level)
if length > 1:
for key, ent in items[1:]:
rep = self._repr(key, context, level)
if sepLines:
write(',\n%s%s: ' % (' '*indent, rep))
else:
write(', %s: ' % rep)
self._format(ent, stream, indent + _len(rep) + 2,
allowance + 1, context, level)
indent = indent - self._indent_per_level
del context[objid]
write('}')
return
if ((issubclass(typ, list) and r is list.__repr__) or
(issubclass(typ, tuple) and r is tuple.__repr__) or
(issubclass(typ, set) and r is set.__repr__) or
(issubclass(typ, frozenset) and r is frozenset.__repr__)
):
length = _len(object)
if issubclass(typ, list):
write('[')
endchar = ']'
elif issubclass(typ, set):
if not length:
write('set()')
return
write('set([')
endchar = '])'
object = _sorted(object)
indent += 4
elif issubclass(typ, frozenset):
if not length:
write('frozenset()')
return
write('frozenset([')
endchar = '])'
object = _sorted(object)
indent += 10
else:
write('(')
endchar = ')'
if self._indent_per_level > 1 and sepLines:
write((self._indent_per_level - 1) * ' ')
if length:
context[objid] = 1
indent = indent + self._indent_per_level
self._format(object[0], stream, indent, allowance + 1,
context, level)
if length > 1:
for ent in object[1:]:
if sepLines:
write(',\n' + ' '*indent)
else:
write(', ')
self._format(ent, stream, indent,
allowance + 1, context, level)
indent = indent - self._indent_per_level
del context[objid]
if issubclass(typ, tuple) and length == 1:
write(',')
write(endchar)
return
write(rep)
def _repr(self, object, context, level):
repr, readable, recursive = self.format(object, context.copy(),
self._depth, level)
if not readable:
self._readable = False
if recursive:
self._recursive = True
return repr
def format(self, object, context, maxlevels, level):
"""Format object for a specific context, returning a string
and flags indicating whether the representation is 'readable'
and whether the object represents a recursive construct.
"""
return _safe_repr(object, context, maxlevels, level)
# Return triple (repr_string, isreadable, isrecursive).
def _safe_repr(object, context, maxlevels, level):
typ = _type(object)
if typ is str:
if 'locale' not in _sys.modules:
return repr(object), True, False
if "'" in object and '"' not in object:
closure = '"'
quotes = {'"': '\\"'}
else:
closure = "'"
quotes = {"'": "\\'"}
qget = quotes.get
sio = _StringIO()
write = sio.write
for char in object:
if char.isalpha():
write(char)
else:
write(qget(char, repr(char)[1:-1]))
return ("%s%s%s" % (closure, sio.getvalue(), closure)), True, False
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict) and r is dict.__repr__:
if not object:
return "{}", True, False
objid = _id(object)
if maxlevels and level >= maxlevels:
return "{...}", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
saferepr = _safe_repr
for k, v in _sorted(object.items()):
krepr, kreadable, krecur = saferepr(k, context, maxlevels, level)
vrepr, vreadable, vrecur = saferepr(v, context, maxlevels, level)
append("%s: %s" % (krepr, vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return "{%s}" % _commajoin(components), readable, recursive
if (issubclass(typ, list) and r is list.__repr__) or \
(issubclass(typ, tuple) and r is tuple.__repr__):
if issubclass(typ, list):
if not object:
return "[]", True, False
format = "[%s]"
elif _len(object) == 1:
format = "(%s,)"
else:
if not object:
return "()", True, False
format = "(%s)"
objid = _id(object)
if maxlevels and level >= maxlevels:
return format % "...", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
for o in object:
orepr, oreadable, orecur = _safe_repr(o, context, maxlevels, level)
append(orepr)
if not oreadable:
readable = False
if orecur:
recursive = True
del context[objid]
return format % _commajoin(components), readable, recursive
rep = repr(object)
return rep, (rep and not rep.startswith('<')), False
def _recursion(object):
return ("<Recursion on %s with id=%s>"
% (_type(object).__name__, _id(object)))
def _perfcheck(object=None):
import time
if object is None:
object = [("string", (1, 2), [3, 4], {5: 6, 7: 8})] * 100000
p = PrettyPrinter()
t1 = time.time()
_safe_repr(object, {}, None, 0)
t2 = time.time()
p.pformat(object)
t3 = time.time()
print "_safe_repr:", t2 - t1
print "pformat:", t3 - t2
if __name__ == "__main__":
_perfcheck()
| mit |
dontnod/weblate | weblate/utils/db.py | 1 | 1599 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2019 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
ESCAPED = frozenset(".\\+*?[^]$(){}=!<>|:-")
def re_escape(pattern):
"""Escape for use in database regexp match.
This is based on re.escape, but that one escapes too much.
"""
string = list(pattern)
for i, char in enumerate(pattern):
if char == "\000":
string[i] = "\\000"
elif char in ESCAPED:
string[i] = "\\" + char
return "".join(string)
def table_has_row(connection, table, rowname):
"""Check whether actual table has row."""
with connection.cursor() as cursor:
table_description = connection.introspection.get_table_description(
cursor, table
)
for row in table_description:
if row.name == rowname:
return True
return False
| gpl-3.0 |
Vishluck/sympy | sympy/printing/tableform.py | 75 | 11818 | from __future__ import print_function, division
from sympy.core.containers import Tuple
from sympy.core.compatibility import range
from types import FunctionType
class TableForm(object):
"""
Create a nice table representation of data.
Examples
========
>>> from sympy import TableForm
>>> t = TableForm([[5, 7], [4, 2], [10, 3]])
>>> print(t)
5 7
4 2
10 3
You can use the SymPy's printing system to produce tables in any
format (ascii, latex, html, ...).
>>> print(t.as_latex())
\\begin{tabular}{l l}
$5$ & $7$ \\\\
$4$ & $2$ \\\\
$10$ & $3$ \\\\
\end{tabular}
"""
def __init__(self, data, **kwarg):
"""
Creates a TableForm.
Parameters:
data ...
2D data to be put into the table; data can be
given as a Matrix
headings ...
gives the labels for rows and columns:
Can be a single argument that applies to both
dimensions:
- None ... no labels
- "automatic" ... labels are 1, 2, 3, ...
Can be a list of labels for rows and columns:
The lables for each dimension can be given
as None, "automatic", or [l1, l2, ...] e.g.
["automatic", None] will number the rows
[default: None]
alignments ...
alignment of the columns with:
- "left" or "<"
- "center" or "^"
- "right" or ">"
When given as a single value, the value is used for
all columns. The row headings (if given) will be
right justified unless an explicit alignment is
given for it and all other columns.
[default: "left"]
formats ...
a list of format strings or functions that accept
3 arguments (entry, row number, col number) and
return a string for the table entry. (If a function
returns None then the _print method will be used.)
wipe_zeros ...
Don't show zeros in the table.
[default: True]
pad ...
the string to use to indicate a missing value (e.g.
elements that are None or those that are missing
from the end of a row (i.e. any row that is shorter
than the rest is assumed to have missing values).
When None, nothing will be shown for values that
are missing from the end of a row; values that are
None, however, will be shown.
[default: None]
Examples
========
>>> from sympy import TableForm, Matrix
>>> TableForm([[5, 7], [4, 2], [10, 3]])
5 7
4 2
10 3
>>> TableForm([list('.'*i) for i in range(1, 4)], headings='automatic')
| 1 2 3
---------
1 | .
2 | . .
3 | . . .
>>> TableForm([['.'*(j if not i%2 else 1) for i in range(3)]
... for j in range(4)], alignments='rcl')
.
. . .
.. . ..
... . ...
"""
from sympy import Symbol, S, Matrix
from sympy.core.sympify import SympifyError
# We only support 2D data. Check the consistency:
if isinstance(data, Matrix):
data = data.tolist()
_w = len(data[0])
_h = len(data)
# fill out any short lines
pad = kwarg.get('pad', None)
ok_None = False
if pad is None:
pad = " "
ok_None = True
pad = Symbol(pad)
_w = max(len(line) for line in data)
for i, line in enumerate(data):
if len(line) != _w:
line.extend([pad]*(_w - len(line)))
for j, lj in enumerate(line):
if lj is None:
if not ok_None:
lj = pad
else:
try:
lj = S(lj)
except SympifyError:
lj = Symbol(str(lj))
line[j] = lj
data[i] = line
_lines = Tuple(*data)
headings = kwarg.get("headings", [None, None])
if headings == "automatic":
_headings = [range(1, _h + 1), range(1, _w + 1)]
else:
h1, h2 = headings
if h1 == "automatic":
h1 = range(1, _h + 1)
if h2 == "automatic":
h2 = range(1, _w + 1)
_headings = [h1, h2]
allow = ('l', 'r', 'c')
alignments = kwarg.get("alignments", "l")
def _std_align(a):
a = a.strip().lower()
if len(a) > 1:
return {'left': 'l', 'right': 'r', 'center': 'c'}.get(a, a)
else:
return {'<': 'l', '>': 'r', '^': 'c'}.get(a, a)
std_align = _std_align(alignments)
if std_align in allow:
_alignments = [std_align]*_w
else:
_alignments = []
for a in alignments:
std_align = _std_align(a)
_alignments.append(std_align)
if std_align not in ('l', 'r', 'c'):
raise ValueError('alignment "%s" unrecognized' %
alignments)
if _headings[0] and len(_alignments) == _w + 1:
_head_align = _alignments[0]
_alignments = _alignments[1:]
else:
_head_align = 'r'
if len(_alignments) != _w:
raise ValueError(
'wrong number of alignments: expected %s but got %s' %
(_w, len(_alignments)))
_column_formats = kwarg.get("formats", [None]*_w)
_wipe_zeros = kwarg.get("wipe_zeros", True)
self._w = _w
self._h = _h
self._lines = _lines
self._headings = _headings
self._head_align = _head_align
self._alignments = _alignments
self._column_formats = _column_formats
self._wipe_zeros = _wipe_zeros
def __repr__(self):
from .str import sstr
return sstr(self, order=None)
def __str__(self):
from .str import sstr
return sstr(self, order=None)
def as_matrix(self):
"""Returns the data of the table in Matrix form.
Examples
========
>>> from sympy import TableForm
>>> t = TableForm([[5, 7], [4, 2], [10, 3]], headings='automatic')
>>> t
| 1 2
--------
1 | 5 7
2 | 4 2
3 | 10 3
>>> t.as_matrix()
Matrix([
[ 5, 7],
[ 4, 2],
[10, 3]])
"""
from sympy import Matrix
return Matrix(self._lines)
def as_str(self):
# XXX obsolete ?
return str(self)
def as_latex(self):
from .latex import latex
return latex(self)
def _sympystr(self, p):
"""
Returns the string representation of 'self'.
Examples
========
>>> from sympy import TableForm
>>> t = TableForm([[5, 7], [4, 2], [10, 3]])
>>> s = t.as_str()
"""
column_widths = [0] * self._w
lines = []
for line in self._lines:
new_line = []
for i in range(self._w):
# Format the item somehow if needed:
s = str(line[i])
if self._wipe_zeros and (s == "0"):
s = " "
w = len(s)
if w > column_widths[i]:
column_widths[i] = w
new_line.append(s)
lines.append(new_line)
# Check heading:
if self._headings[0]:
self._headings[0] = [str(x) for x in self._headings[0]]
_head_width = max([len(x) for x in self._headings[0]])
if self._headings[1]:
new_line = []
for i in range(self._w):
# Format the item somehow if needed:
s = str(self._headings[1][i])
w = len(s)
if w > column_widths[i]:
column_widths[i] = w
new_line.append(s)
self._headings[1] = new_line
format_str = []
def _align(align, w):
return '%%%s%ss' % (
("-" if align == "l" else ""),
str(w))
format_str = [_align(align, w) for align, w in
zip(self._alignments, column_widths)]
if self._headings[0]:
format_str.insert(0, _align(self._head_align, _head_width))
format_str.insert(1, '|')
format_str = ' '.join(format_str) + '\n'
s = []
if self._headings[1]:
d = self._headings[1]
if self._headings[0]:
d = [""] + d
first_line = format_str % tuple(d)
s.append(first_line)
s.append("-" * (len(first_line) - 1) + "\n")
for i, line in enumerate(lines):
d = [l if self._alignments[j] != 'c' else
l.center(column_widths[j]) for j, l in enumerate(line)]
if self._headings[0]:
l = self._headings[0][i]
l = (l if self._head_align != 'c' else
l.center(_head_width))
d = [l] + d
s.append(format_str % tuple(d))
return ''.join(s)[:-1] # don't include trailing newline
def _latex(self, printer):
"""
Returns the string representation of 'self'.
"""
# Check heading:
if self._headings[1]:
new_line = []
for i in range(self._w):
# Format the item somehow if needed:
new_line.append(str(self._headings[1][i]))
self._headings[1] = new_line
alignments = []
if self._headings[0]:
self._headings[0] = [str(x) for x in self._headings[0]]
alignments = [self._head_align]
alignments.extend(self._alignments)
s = r"\begin{tabular}{" + " ".join(alignments) + "}\n"
if self._headings[1]:
d = self._headings[1]
if self._headings[0]:
d = [""] + d
first_line = " & ".join(d) + r" \\" + "\n"
s += first_line
s += r"\hline" + "\n"
for i, line in enumerate(self._lines):
d = []
for j, x in enumerate(line):
if self._wipe_zeros and (x in (0, "0")):
d.append(" ")
continue
f = self._column_formats[j]
if f:
if isinstance(f, FunctionType):
v = f(x, i, j)
if v is None:
v = printer._print(x)
else:
v = f % x
d.append(v)
else:
v = printer._print(x)
d.append("$%s$" % v)
if self._headings[0]:
d = [self._headings[0][i]] + d
s += " & ".join(d) + r" \\" + "\n"
s += r"\end{tabular}"
return s
| bsd-3-clause |
nikolas/lettuce | tests/integration/lib/Django-1.2.5/django/conf/locale/fr/formats.py | 80 | 1252 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j F Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j N Y'
SHORT_DATETIME_FORMAT = 'j N Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%d.%m.%Y', '%d.%m.%y', # Swiss (fr_CH), '25.10.2006', '25.10.06'
'%Y-%m-%d', '%y-%m-%d', # '2006-10-25', '06-10-25'
# '%d %B %Y', '%d %b %Y', # '25 octobre 2006', '25 oct. 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d.%m.%Y %H:%M:%S', # Swiss (fr_CH), '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # Swiss (fr_CH), '25.10.2006 14:30'
'%d.%m.%Y', # Swiss (fr_CH), '25.10.2006'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
| gpl-3.0 |
Vagab0nd/SiCKRAGE | lib3/sqlalchemy/dialects/postgresql/psycopg2.py | 3 | 35979 | # postgresql/psycopg2.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: postgresql+psycopg2
:name: psycopg2
:dbapi: psycopg2
:connectstring: postgresql+psycopg2://user:password@host:port/dbname[?key=value&key=value...]
:url: http://pypi.python.org/pypi/psycopg2/
psycopg2 Connect Arguments
-----------------------------------
psycopg2-specific keyword arguments which are accepted by
:func:`_sa.create_engine()` are:
* ``server_side_cursors``: Enable the usage of "server side cursors" for SQL
statements which support this feature. What this essentially means from a
psycopg2 point of view is that the cursor is created using a name, e.g.
``connection.cursor('some name')``, which has the effect that result rows
are not immediately pre-fetched and buffered after statement execution, but
are instead left on the server and only retrieved as needed. SQLAlchemy's
:class:`~sqlalchemy.engine.ResultProxy` uses special row-buffering
behavior when this feature is enabled, such that groups of 100 rows at a
time are fetched over the wire to reduce conversational overhead.
Note that the :paramref:`.Connection.execution_options.stream_results`
execution option is a more targeted
way of enabling this mode on a per-execution basis.
* ``use_native_unicode``: Enable the usage of Psycopg2 "native unicode" mode
per connection. True by default.
.. seealso::
:ref:`psycopg2_disable_native_unicode`
* ``isolation_level``: This option, available for all PostgreSQL dialects,
includes the ``AUTOCOMMIT`` isolation level when using the psycopg2
dialect.
.. seealso::
:ref:`psycopg2_isolation_level`
* ``client_encoding``: sets the client encoding in a libpq-agnostic way,
using psycopg2's ``set_client_encoding()`` method.
.. seealso::
:ref:`psycopg2_unicode`
* ``executemany_mode``, ``executemany_batch_page_size``,
``executemany_values_page_size``: Allows use of psycopg2
extensions for optimizing "executemany"-stye queries. See the referenced
section below for details.
.. seealso::
:ref:`psycopg2_executemany_mode`
* ``use_batch_mode``: this is the previous setting used to affect "executemany"
mode and is now deprecated.
Unix Domain Connections
------------------------
psycopg2 supports connecting via Unix domain connections. When the ``host``
portion of the URL is omitted, SQLAlchemy passes ``None`` to psycopg2,
which specifies Unix-domain communication rather than TCP/IP communication::
create_engine("postgresql+psycopg2://user:password@/dbname")
By default, the socket file used is to connect to a Unix-domain socket
in ``/tmp``, or whatever socket directory was specified when PostgreSQL
was built. This value can be overridden by passing a pathname to psycopg2,
using ``host`` as an additional keyword argument::
create_engine("postgresql+psycopg2://user:password@/dbname?host=/var/lib/postgresql")
.. seealso::
`PQconnectdbParams \
<http://www.postgresql.org/docs/9.1/static/libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS>`_
Empty DSN Connections / Environment Variable Connections
---------------------------------------------------------
The psycopg2 DBAPI can connect to PostgreSQL by passing an empty DSN to the
libpq client library, which by default indicates to connect to a localhost
PostgreSQL database that is open for "trust" connections. This behavior can be
further tailored using a particular set of environment variables which are
prefixed with ``PG_...``, which are consumed by ``libpq`` to take the place of
any or all elements of the connection string.
For this form, the URL can be passed without any elements other than the
initial scheme::
engine = create_engine('postgresql+psycopg2://')
In the above form, a blank "dsn" string is passed to the ``psycopg2.connect()``
function which in turn represents an empty DSN passed to libpq.
.. versionadded:: 1.3.2 support for parameter-less connections with psycopg2.
.. seealso::
`Environment Variables\
<https://www.postgresql.org/docs/current/libpq-envars.html>`_ -
PostgreSQL documentation on how to use ``PG_...``
environment variables for connections.
.. _psycopg2_execution_options:
Per-Statement/Connection Execution Options
-------------------------------------------
The following DBAPI-specific options are respected when used with
:meth:`_engine.Connection.execution_options`,
:meth:`.Executable.execution_options`,
:meth:`_query.Query.execution_options`,
in addition to those not specific to DBAPIs:
* ``isolation_level`` - Set the transaction isolation level for the lifespan
of a :class:`_engine.Connection` (can only be set on a connection,
not a statement
or query). See :ref:`psycopg2_isolation_level`.
* ``stream_results`` - Enable or disable usage of psycopg2 server side
cursors - this feature makes use of "named" cursors in combination with
special result handling methods so that result rows are not fully buffered.
If ``None`` or not set, the ``server_side_cursors`` option of the
:class:`_engine.Engine` is used.
* ``max_row_buffer`` - when using ``stream_results``, an integer value that
specifies the maximum number of rows to buffer at a time. This is
interpreted by the :class:`.BufferedRowResultProxy`, and if omitted the
buffer will grow to ultimately store 1000 rows at a time.
.. versionadded:: 1.0.6
.. _psycopg2_batch_mode:
.. _psycopg2_executemany_mode:
Psycopg2 Fast Execution Helpers
-------------------------------
Modern versions of psycopg2 include a feature known as
`Fast Execution Helpers \
<http://initd.org/psycopg/docs/extras.html#fast-execution-helpers>`_, which
have been shown in benchmarking to improve psycopg2's executemany()
performance, primarily with INSERT statements, by multiple orders of magnitude.
SQLAlchemy allows this extension to be used for all ``executemany()`` style
calls invoked by an :class:`_engine.Engine`
when used with :ref:`multiple parameter
sets <execute_multiple>`, which includes the use of this feature both by the
Core as well as by the ORM for inserts of objects with non-autogenerated
primary key values, by adding the ``executemany_mode`` flag to
:func:`_sa.create_engine`::
engine = create_engine(
"postgresql+psycopg2://scott:tiger@host/dbname",
executemany_mode='batch')
.. versionchanged:: 1.3.7 - the ``use_batch_mode`` flag has been superseded
by a new parameter ``executemany_mode`` which provides support both for
psycopg2's ``execute_batch`` helper as well as the ``execute_values``
helper.
Possible options for ``executemany_mode`` include:
* ``None`` - By default, psycopg2's extensions are not used, and the usual
``cursor.executemany()`` method is used when invoking batches of statements.
* ``'batch'`` - Uses ``psycopg2.extras.execute_batch`` so that multiple copies
of a SQL query, each one corresponding to a parameter set passed to
``executemany()``, are joined into a single SQL string separated by a
semicolon. This is the same behavior as was provided by the
``use_batch_mode=True`` flag.
* ``'values'``- For Core :func:`_expression.insert`
constructs only (including those
emitted by the ORM automatically), the ``psycopg2.extras.execute_values``
extension is used so that multiple parameter sets are grouped into a single
INSERT statement and joined together with multiple VALUES expressions. This
method requires that the string text of the VALUES clause inside the
INSERT statement is manipulated, so is only supported with a compiled
:func:`_expression.insert` construct where the format is predictable.
For all other
constructs, including plain textual INSERT statements not rendered by the
SQLAlchemy expression language compiler, the
``psycopg2.extras.execute_batch`` method is used. It is therefore important
to note that **"values" mode implies that "batch" mode is also used for
all statements for which "values" mode does not apply**.
For both strategies, the ``executemany_batch_page_size`` and
``executemany_values_page_size`` arguments control how many parameter sets
should be represented in each execution. Because "values" mode implies a
fallback down to "batch" mode for non-INSERT statements, there are two
independent page size arguments. For each, the default value of ``None`` means
to use psycopg2's defaults, which at the time of this writing are quite low at
100. For the ``execute_values`` method, a number as high as 10000 may prove
to be performant, whereas for ``execute_batch``, as the number represents
full statements repeated, a number closer to the default of 100 is likely
more appropriate::
engine = create_engine(
"postgresql+psycopg2://scott:tiger@host/dbname",
executemany_mode='values',
executemany_values_page_size=10000, executemany_batch_page_size=500)
.. seealso::
:ref:`execute_multiple` - General information on using the
:class:`_engine.Connection`
object to execute statements in such a way as to make
use of the DBAPI ``.executemany()`` method.
.. versionchanged:: 1.3.7 - Added support for
``psycopg2.extras.execute_values``. The ``use_batch_mode`` flag is
superseded by the ``executemany_mode`` flag.
.. _psycopg2_unicode:
Unicode with Psycopg2
----------------------
By default, the psycopg2 driver uses the ``psycopg2.extensions.UNICODE``
extension, such that the DBAPI receives and returns all strings as Python
Unicode objects directly - SQLAlchemy passes these values through without
change. Psycopg2 here will encode/decode string values based on the
current "client encoding" setting; by default this is the value in
the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``.
Typically, this can be changed to ``utf8``, as a more useful default::
# postgresql.conf file
# client_encoding = sql_ascii # actually, defaults to database
# encoding
client_encoding = utf8
A second way to affect the client encoding is to set it within Psycopg2
locally. SQLAlchemy will call psycopg2's
:meth:`psycopg2:connection.set_client_encoding` method
on all new connections based on the value passed to
:func:`_sa.create_engine` using the ``client_encoding`` parameter::
# set_client_encoding() setting;
# works for *all* PostgreSQL versions
engine = create_engine("postgresql://user:pass@host/dbname",
client_encoding='utf8')
This overrides the encoding specified in the PostgreSQL client configuration.
When using the parameter in this way, the psycopg2 driver emits
``SET client_encoding TO 'utf8'`` on the connection explicitly, and works
in all PostgreSQL versions.
Note that the ``client_encoding`` setting as passed to
:func:`_sa.create_engine`
is **not the same** as the more recently added ``client_encoding`` parameter
now supported by libpq directly. This is enabled when ``client_encoding``
is passed directly to ``psycopg2.connect()``, and from SQLAlchemy is passed
using the :paramref:`_sa.create_engine.connect_args` parameter::
engine = create_engine(
"postgresql://user:pass@host/dbname",
connect_args={'client_encoding': 'utf8'})
# using the query string is equivalent
engine = create_engine("postgresql://user:pass@host/dbname?client_encoding=utf8")
The above parameter was only added to libpq as of version 9.1 of PostgreSQL,
so using the previous method is better for cross-version support.
.. _psycopg2_disable_native_unicode:
Disabling Native Unicode
^^^^^^^^^^^^^^^^^^^^^^^^
SQLAlchemy can also be instructed to skip the usage of the psycopg2
``UNICODE`` extension and to instead utilize its own unicode encode/decode
services, which are normally reserved only for those DBAPIs that don't
fully support unicode directly. Passing ``use_native_unicode=False`` to
:func:`_sa.create_engine` will disable usage of ``psycopg2.extensions.
UNICODE``.
SQLAlchemy will instead encode data itself into Python bytestrings on the way
in and coerce from bytes on the way back,
using the value of the :func:`_sa.create_engine` ``encoding`` parameter, which
defaults to ``utf-8``.
SQLAlchemy's own unicode encode/decode functionality is steadily becoming
obsolete as most DBAPIs now support unicode fully.
Bound Parameter Styles
----------------------
The default parameter style for the psycopg2 dialect is "pyformat", where
SQL is rendered using ``%(paramname)s`` style. This format has the limitation
that it does not accommodate the unusual case of parameter names that
actually contain percent or parenthesis symbols; as SQLAlchemy in many cases
generates bound parameter names based on the name of a column, the presence
of these characters in a column name can lead to problems.
There are two solutions to the issue of a :class:`_schema.Column`
that contains
one of these characters in its name. One is to specify the
:paramref:`.schema.Column.key` for columns that have such names::
measurement = Table('measurement', metadata,
Column('Size (meters)', Integer, key='size_meters')
)
Above, an INSERT statement such as ``measurement.insert()`` will use
``size_meters`` as the parameter name, and a SQL expression such as
``measurement.c.size_meters > 10`` will derive the bound parameter name
from the ``size_meters`` key as well.
.. versionchanged:: 1.0.0 - SQL expressions will use
:attr:`_schema.Column.key`
as the source of naming when anonymous bound parameters are created
in SQL expressions; previously, this behavior only applied to
:meth:`_schema.Table.insert` and :meth:`_schema.Table.update`
parameter names.
The other solution is to use a positional format; psycopg2 allows use of the
"format" paramstyle, which can be passed to
:paramref:`_sa.create_engine.paramstyle`::
engine = create_engine(
'postgresql://scott:tiger@localhost:5432/test', paramstyle='format')
With the above engine, instead of a statement like::
INSERT INTO measurement ("Size (meters)") VALUES (%(Size (meters))s)
{'Size (meters)': 1}
we instead see::
INSERT INTO measurement ("Size (meters)") VALUES (%s)
(1, )
Where above, the dictionary style is converted into a tuple with positional
style.
Transactions
------------
The psycopg2 dialect fully supports SAVEPOINT and two-phase commit operations.
.. _psycopg2_isolation_level:
Psycopg2 Transaction Isolation Level
-------------------------------------
As discussed in :ref:`postgresql_isolation_level`,
all PostgreSQL dialects support setting of transaction isolation level
both via the ``isolation_level`` parameter passed to :func:`_sa.create_engine`
,
as well as the ``isolation_level`` argument used by
:meth:`_engine.Connection.execution_options`. When using the psycopg2 dialect
, these
options make use of psycopg2's ``set_isolation_level()`` connection method,
rather than emitting a PostgreSQL directive; this is because psycopg2's
API-level setting is always emitted at the start of each transaction in any
case.
The psycopg2 dialect supports these constants for isolation level:
* ``READ COMMITTED``
* ``READ UNCOMMITTED``
* ``REPEATABLE READ``
* ``SERIALIZABLE``
* ``AUTOCOMMIT``
.. seealso::
:ref:`postgresql_isolation_level`
:ref:`pg8000_isolation_level`
NOTICE logging
---------------
The psycopg2 dialect will log PostgreSQL NOTICE messages
via the ``sqlalchemy.dialects.postgresql`` logger. When this logger
is set to the ``logging.INFO`` level, notice messages will be logged::
import logging
logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO)
Above, it is assumed that logging is configured externally. If this is not
the case, configuration such as ``logging.basicConfig()`` must be utilized::
import logging
logging.basicConfig() # log messages to stdout
logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO)
.. seealso::
`Logging HOWTO <https://docs.python.org/3/howto/logging.html>`_ - on the python.org website
.. _psycopg2_hstore:
HSTORE type
------------
The ``psycopg2`` DBAPI includes an extension to natively handle marshalling of
the HSTORE type. The SQLAlchemy psycopg2 dialect will enable this extension
by default when psycopg2 version 2.4 or greater is used, and
it is detected that the target database has the HSTORE type set up for use.
In other words, when the dialect makes the first
connection, a sequence like the following is performed:
1. Request the available HSTORE oids using
``psycopg2.extras.HstoreAdapter.get_oids()``.
If this function returns a list of HSTORE identifiers, we then determine
that the ``HSTORE`` extension is present.
This function is **skipped** if the version of psycopg2 installed is
less than version 2.4.
2. If the ``use_native_hstore`` flag is at its default of ``True``, and
we've detected that ``HSTORE`` oids are available, the
``psycopg2.extensions.register_hstore()`` extension is invoked for all
connections.
The ``register_hstore()`` extension has the effect of **all Python
dictionaries being accepted as parameters regardless of the type of target
column in SQL**. The dictionaries are converted by this extension into a
textual HSTORE expression. If this behavior is not desired, disable the
use of the hstore extension by setting ``use_native_hstore`` to ``False`` as
follows::
engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test",
use_native_hstore=False)
The ``HSTORE`` type is **still supported** when the
``psycopg2.extensions.register_hstore()`` extension is not used. It merely
means that the coercion between Python dictionaries and the HSTORE
string format, on both the parameter side and the result side, will take
place within SQLAlchemy's own marshalling logic, and not that of ``psycopg2``
which may be more performant.
""" # noqa
from __future__ import absolute_import
import decimal
import logging
import re
from .base import _DECIMAL_TYPES
from .base import _FLOAT_TYPES
from .base import _INT_TYPES
from .base import ENUM
from .base import PGCompiler
from .base import PGDialect
from .base import PGExecutionContext
from .base import PGIdentifierPreparer
from .base import UUID
from .hstore import HSTORE
from .json import JSON
from .json import JSONB
from ... import exc
from ... import processors
from ... import types as sqltypes
from ... import util
from ...engine import result as _result
from ...util import collections_abc
try:
from uuid import UUID as _python_UUID # noqa
except ImportError:
_python_UUID = None
logger = logging.getLogger("sqlalchemy.dialects.postgresql")
class _PGNumeric(sqltypes.Numeric):
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
if self.asdecimal:
if coltype in _FLOAT_TYPES:
return processors.to_decimal_processor_factory(
decimal.Decimal, self._effective_decimal_return_scale
)
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
# pg8000 returns Decimal natively for 1700
return None
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype
)
else:
if coltype in _FLOAT_TYPES:
# pg8000 returns float natively for 701
return None
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
return processors.to_float
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype
)
class _PGEnum(ENUM):
def result_processor(self, dialect, coltype):
if util.py2k and self._expect_unicode is True:
# for py2k, if the enum type needs unicode data (which is set up as
# part of the Enum() constructor based on values passed as py2k
# unicode objects) we have to use our own converters since
# psycopg2's don't work, a rare exception to the "modern DBAPIs
# support unicode everywhere" theme of deprecating
# convert_unicode=True. Use the special "force_nocheck" directive
# which forces unicode conversion to happen on the Python side
# without an isinstance() check. in py3k psycopg2 does the right
# thing automatically.
self._expect_unicode = "force_nocheck"
return super(_PGEnum, self).result_processor(dialect, coltype)
class _PGHStore(HSTORE):
def bind_processor(self, dialect):
if dialect._has_native_hstore:
return None
else:
return super(_PGHStore, self).bind_processor(dialect)
def result_processor(self, dialect, coltype):
if dialect._has_native_hstore:
return None
else:
return super(_PGHStore, self).result_processor(dialect, coltype)
class _PGJSON(JSON):
def result_processor(self, dialect, coltype):
if dialect._has_native_json:
return None
else:
return super(_PGJSON, self).result_processor(dialect, coltype)
class _PGJSONB(JSONB):
def result_processor(self, dialect, coltype):
if dialect._has_native_jsonb:
return None
else:
return super(_PGJSONB, self).result_processor(dialect, coltype)
class _PGUUID(UUID):
def bind_processor(self, dialect):
if not self.as_uuid and dialect.use_native_uuid:
def process(value):
if value is not None:
value = _python_UUID(value)
return value
return process
def result_processor(self, dialect, coltype):
if not self.as_uuid and dialect.use_native_uuid:
def process(value):
if value is not None:
value = str(value)
return value
return process
_server_side_id = util.counter()
class PGExecutionContext_psycopg2(PGExecutionContext):
def create_server_side_cursor(self):
# use server-side cursors:
# http://lists.initd.org/pipermail/psycopg/2007-January/005251.html
ident = "c_%s_%s" % (hex(id(self))[2:], hex(_server_side_id())[2:])
return self._dbapi_connection.cursor(ident)
def get_result_proxy(self):
self._log_notices(self.cursor)
if self._is_server_side:
return _result.BufferedRowResultProxy(self)
else:
return _result.ResultProxy(self)
def _log_notices(self, cursor):
# check also that notices is an iterable, after it's already
# established that we will be iterating through it. This is to get
# around test suites such as SQLAlchemy's using a Mock object for
# cursor
if not cursor.connection.notices or not isinstance(
cursor.connection.notices, collections_abc.Iterable
):
return
for notice in cursor.connection.notices:
# NOTICE messages have a
# newline character at the end
logger.info(notice.rstrip())
cursor.connection.notices[:] = []
class PGCompiler_psycopg2(PGCompiler):
pass
class PGIdentifierPreparer_psycopg2(PGIdentifierPreparer):
pass
EXECUTEMANY_DEFAULT = util.symbol("executemany_default")
EXECUTEMANY_BATCH = util.symbol("executemany_batch")
EXECUTEMANY_VALUES = util.symbol("executemany_values")
class PGDialect_psycopg2(PGDialect):
driver = "psycopg2"
if util.py2k:
supports_unicode_statements = False
supports_server_side_cursors = True
default_paramstyle = "pyformat"
# set to true based on psycopg2 version
supports_sane_multi_rowcount = False
execution_ctx_cls = PGExecutionContext_psycopg2
statement_compiler = PGCompiler_psycopg2
preparer = PGIdentifierPreparer_psycopg2
psycopg2_version = (0, 0)
FEATURE_VERSION_MAP = dict(
native_json=(2, 5),
native_jsonb=(2, 5, 4),
sane_multi_rowcount=(2, 0, 9),
array_oid=(2, 4, 3),
hstore_adapter=(2, 4),
)
_has_native_hstore = False
_has_native_json = False
_has_native_jsonb = False
engine_config_types = PGDialect.engine_config_types.union(
[("use_native_unicode", util.asbool)]
)
colspecs = util.update_copy(
PGDialect.colspecs,
{
sqltypes.Numeric: _PGNumeric,
ENUM: _PGEnum, # needs force_unicode
sqltypes.Enum: _PGEnum, # needs force_unicode
HSTORE: _PGHStore,
JSON: _PGJSON,
sqltypes.JSON: _PGJSON,
JSONB: _PGJSONB,
UUID: _PGUUID,
},
)
@util.deprecated_params(
use_batch_mode=(
"1.3.7",
"The psycopg2 use_batch_mode flag is superseded by "
"executemany_mode='batch'",
)
)
def __init__(
self,
server_side_cursors=False,
use_native_unicode=True,
client_encoding=None,
use_native_hstore=True,
use_native_uuid=True,
executemany_mode=None,
executemany_batch_page_size=None,
executemany_values_page_size=None,
use_batch_mode=None,
**kwargs
):
PGDialect.__init__(self, **kwargs)
self.server_side_cursors = server_side_cursors
self.use_native_unicode = use_native_unicode
self.use_native_hstore = use_native_hstore
self.use_native_uuid = use_native_uuid
self.supports_unicode_binds = use_native_unicode
self.client_encoding = client_encoding
# Parse executemany_mode argument, allowing it to be only one of the
# symbol names
self.executemany_mode = util.symbol.parse_user_argument(
executemany_mode,
{
EXECUTEMANY_DEFAULT: [None],
EXECUTEMANY_BATCH: ["batch"],
EXECUTEMANY_VALUES: ["values"],
},
"executemany_mode",
)
if use_batch_mode:
self.executemany_mode = EXECUTEMANY_BATCH
self.executemany_batch_page_size = executemany_batch_page_size
self.executemany_values_page_size = executemany_values_page_size
if self.dbapi and hasattr(self.dbapi, "__version__"):
m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", self.dbapi.__version__)
if m:
self.psycopg2_version = tuple(
int(x) for x in m.group(1, 2, 3) if x is not None
)
def initialize(self, connection):
super(PGDialect_psycopg2, self).initialize(connection)
self._has_native_hstore = (
self.use_native_hstore
and self._hstore_oids(connection.connection) is not None
)
self._has_native_json = (
self.psycopg2_version >= self.FEATURE_VERSION_MAP["native_json"]
)
self._has_native_jsonb = (
self.psycopg2_version >= self.FEATURE_VERSION_MAP["native_jsonb"]
)
# http://initd.org/psycopg/docs/news.html#what-s-new-in-psycopg-2-0-9
self.supports_sane_multi_rowcount = (
self.psycopg2_version
>= self.FEATURE_VERSION_MAP["sane_multi_rowcount"]
and self.executemany_mode is EXECUTEMANY_DEFAULT
)
@classmethod
def dbapi(cls):
import psycopg2
return psycopg2
@classmethod
def _psycopg2_extensions(cls):
from psycopg2 import extensions
return extensions
@classmethod
def _psycopg2_extras(cls):
from psycopg2 import extras
return extras
@util.memoized_property
def _isolation_lookup(self):
extensions = self._psycopg2_extensions()
return {
"AUTOCOMMIT": extensions.ISOLATION_LEVEL_AUTOCOMMIT,
"READ COMMITTED": extensions.ISOLATION_LEVEL_READ_COMMITTED,
"READ UNCOMMITTED": extensions.ISOLATION_LEVEL_READ_UNCOMMITTED,
"REPEATABLE READ": extensions.ISOLATION_LEVEL_REPEATABLE_READ,
"SERIALIZABLE": extensions.ISOLATION_LEVEL_SERIALIZABLE,
}
def set_isolation_level(self, connection, level):
try:
level = self._isolation_lookup[level.replace("_", " ")]
except KeyError as err:
util.raise_(
exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s"
% (level, self.name, ", ".join(self._isolation_lookup))
),
replace_context=err,
)
connection.set_isolation_level(level)
def on_connect(self):
extras = self._psycopg2_extras()
extensions = self._psycopg2_extensions()
fns = []
if self.client_encoding is not None:
def on_connect(conn):
conn.set_client_encoding(self.client_encoding)
fns.append(on_connect)
if self.isolation_level is not None:
def on_connect(conn):
self.set_isolation_level(conn, self.isolation_level)
fns.append(on_connect)
if self.dbapi and self.use_native_uuid:
def on_connect(conn):
extras.register_uuid(None, conn)
fns.append(on_connect)
if self.dbapi and self.use_native_unicode:
def on_connect(conn):
extensions.register_type(extensions.UNICODE, conn)
extensions.register_type(extensions.UNICODEARRAY, conn)
fns.append(on_connect)
if self.dbapi and self.use_native_hstore:
def on_connect(conn):
hstore_oids = self._hstore_oids(conn)
if hstore_oids is not None:
oid, array_oid = hstore_oids
kw = {"oid": oid}
if util.py2k:
kw["unicode"] = True
if (
self.psycopg2_version
>= self.FEATURE_VERSION_MAP["array_oid"]
):
kw["array_oid"] = array_oid
extras.register_hstore(conn, **kw)
fns.append(on_connect)
if self.dbapi and self._json_deserializer:
def on_connect(conn):
if self._has_native_json:
extras.register_default_json(
conn, loads=self._json_deserializer
)
if self._has_native_jsonb:
extras.register_default_jsonb(
conn, loads=self._json_deserializer
)
fns.append(on_connect)
if fns:
def on_connect(conn):
for fn in fns:
fn(conn)
return on_connect
else:
return None
def do_executemany(self, cursor, statement, parameters, context=None):
if self.executemany_mode is EXECUTEMANY_DEFAULT:
cursor.executemany(statement, parameters)
return
if (
self.executemany_mode is EXECUTEMANY_VALUES
and context
and context.isinsert
and context.compiled.insert_single_values_expr
):
executemany_values = (
"(%s)" % context.compiled.insert_single_values_expr
)
# guard for statement that was altered via event hook or similar
if executemany_values not in statement:
executemany_values = None
else:
executemany_values = None
if executemany_values:
# Currently, SQLAlchemy does not pass "RETURNING" statements
# into executemany(), since no DBAPI has ever supported that
# until the introduction of psycopg2's executemany_values, so
# we are not yet using the fetch=True flag.
statement = statement.replace(executemany_values, "%s")
if self.executemany_values_page_size:
kwargs = {"page_size": self.executemany_values_page_size}
else:
kwargs = {}
self._psycopg2_extras().execute_values(
cursor,
statement,
parameters,
template=executemany_values,
**kwargs
)
else:
if self.executemany_batch_page_size:
kwargs = {"page_size": self.executemany_batch_page_size}
else:
kwargs = {}
self._psycopg2_extras().execute_batch(
cursor, statement, parameters, **kwargs
)
@util.memoized_instancemethod
def _hstore_oids(self, conn):
if self.psycopg2_version >= self.FEATURE_VERSION_MAP["hstore_adapter"]:
extras = self._psycopg2_extras()
oids = extras.HstoreAdapter.get_oids(conn)
if oids is not None and oids[0]:
return oids[0:2]
return None
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
if opts:
if "port" in opts:
opts["port"] = int(opts["port"])
opts.update(url.query)
# send individual dbname, user, password, host, port
# parameters to psycopg2.connect()
return ([], opts)
elif url.query:
# any other connection arguments, pass directly
opts.update(url.query)
return ([], opts)
else:
# no connection arguments whatsoever; psycopg2.connect()
# requires that "dsn" be present as a blank string.
return ([""], opts)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.Error):
# check the "closed" flag. this might not be
# present on old psycopg2 versions. Also,
# this flag doesn't actually help in a lot of disconnect
# situations, so don't rely on it.
if getattr(connection, "closed", False):
return True
# checks based on strings. in the case that .closed
# didn't cut it, fall back onto these.
str_e = str(e).partition("\n")[0]
for msg in [
# these error messages from libpq: interfaces/libpq/fe-misc.c
# and interfaces/libpq/fe-secure.c.
"terminating connection",
"closed the connection",
"connection not open",
"could not receive data from server",
"could not send data to server",
# psycopg2 client errors, psycopg2/conenction.h,
# psycopg2/cursor.h
"connection already closed",
"cursor already closed",
# not sure where this path is originally from, it may
# be obsolete. It really says "losed", not "closed".
"losed the connection unexpectedly",
# these can occur in newer SSL
"connection has been closed unexpectedly",
"SSL SYSCALL error: Bad file descriptor",
"SSL SYSCALL error: EOF detected",
"SSL error: decryption failed or bad record mac",
"SSL SYSCALL error: Operation timed out",
]:
idx = str_e.find(msg)
if idx >= 0 and '"' not in str_e[:idx]:
return True
return False
dialect = PGDialect_psycopg2
| gpl-3.0 |
cherusk/ansible | lib/ansible/modules/cloud/azure/azure_rm_storageblob.py | 56 | 22010 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: azure_rm_storageblob
short_description: Manage blob containers and blob objects.
version_added: "2.1"
description:
- Create, update and delete blob containers and blob objects. Use to upload a file and store it as a blob object,
or download a blob object to a file.
options:
storage_account_name:
description:
- Name of the storage account to use.
required: true
aliases:
- account_name
blob:
description:
- Name of a blob object within the container.
required: false
default: null
aliases:
- blob_name
container:
description:
- Name of a blob container within the storage account.
required: true
aliases:
- container_name
content_type:
description:
- Set the blob content-type header. For example, 'image/png'.
default: null
required: false
cache_control:
description:
- Set the blob cache-control header.
required: false
default: null
content_disposition:
description:
- Set the blob content-disposition header.
required: false
default: null
content_encoding:
description:
- Set the blob encoding header.
required: false
default: null
content_language:
description:
- Set the blob content-language header.
required: false
default: null
content_md5:
description:
- Set the blob md5 hash value.
required: false
default: null
dest:
description:
- Destination file path. Use with state 'present' to download a blob.
aliases:
- destination
required: false
default: null
force:
description:
- Overwrite existing blob or file when uploading or downloading. Force deletion of a container
that contains blobs.
default: false
required: false
resource_group:
description:
- Name of the resource group to use.
required: true
src:
description:
- Source file path. Use with state 'present' to upload a blob.
aliases:
- source
required: false
default: null
state:
description:
- Assert the state of a container or blob.
- Use state 'absent' with a container value only to delete a container. Include a blob value to remove
a specific blob. A container will not be deleted, if it contains blobs. Use the force option to override,
deleting the container and all associated blobs.
- Use state 'present' to create or update a container and upload or download a blob. If the container
does not exist, it will be created. If it exists, it will be updated with configuration options. Provide
a blob name and either src or dest to upload or download. Provide a src path to upload and a dest path
to download. If a blob (uploading) or a file (downloading) already exists, it will not be overwritten
unless the force parameter is true.
default: present
required: false
choices:
- absent
- present
public_access:
description:
- Determine a container's level of public access. By default containers are private. Can only be set at
time of container creation.
required: false
default: null
choices:
- container
- blob
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Remove container foo
azure_rm_storageblob:
resource_group: testing
storage_account_name: clh0002
container: foo
state: absent
- name: Create container foo and upload a file
azure_rm_storageblob:
resource_group: Testing
storage_account_name: clh0002
container: foo
blob: graylog.png
src: ./files/graylog.png
public_access: container
content_type: 'application/image'
- name: Download the file
azure_rm_storageblob:
resource_group: Testing
storage_account_name: clh0002
container: foo
blob: graylog.png
dest: ~/tmp/images/graylog.png
'''
RETURN = '''
blob:
description: Facts about the current state of the blob.
returned: when a blob is operated on
type: dict
sample: {
"content_length": 136532,
"content_settings": {
"cache_control": null,
"content_disposition": null,
"content_encoding": null,
"content_language": null,
"content_md5": null,
"content_type": "application/image"
},
"last_modified": "09-Mar-2016 22:08:25 +0000",
"name": "graylog.png",
"tags": {},
"type": "BlockBlob"
}
container:
description: Facts about the current state of the selected container.
returned: always
type: dict
sample: {
"last_mdoified": "09-Mar-2016 19:28:26 +0000",
"name": "foo",
"tags": {}
}
'''
import os
from ansible.module_utils.basic import *
from ansible.module_utils.azure_rm_common import *
try:
from azure.storage.blob.models import ContentSettings
from azure.common import AzureMissingResourceHttpError, AzureHttpError
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMStorageBlob(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
storage_account_name=dict(required=True, type='str', aliases=['account_name']),
blob=dict(type='str', aliases=['blob_name']),
container=dict(required=True, type='str', aliases=['container_name']),
dest=dict(type='str'),
force=dict(type='bool', default=False),
resource_group=dict(required=True, type='str'),
src=dict(type='str'),
state=dict(type='str', default='present', choices=['absent', 'present']),
public_access=dict(type='str', choices=['container', 'blob']),
content_type=dict(type='str'),
content_encoding=dict(type='str'),
content_language=dict(type='str'),
content_disposition=dict(type='str'),
cache_control=dict(type='str'),
content_md5=dict(type='str'),
)
mutually_exclusive = [('src', 'dest')]
self.blob_client = None
self.blob_details = None
self.storage_account_name = None
self.blob = None
self.blob_obj = None
self.container = None
self.container_obj = None
self.dest = None
self.force = None
self.resource_group = None
self.src = None
self.state = None
self.tags = None
self.public_access = None
self.results = dict(
changed=False,
actions=[],
container=dict(),
blob=dict()
)
super(AzureRMStorageBlob, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
mutually_exclusive=mutually_exclusive,
supports_tags=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec.keys() + ['tags']:
setattr(self, key, kwargs[key])
self.results['check_mode'] = self.check_mode
# add file path validation
self.blob_client = self.get_blob_client(self.resource_group, self.storage_account_name)
self.container_obj = self.get_container()
if self.blob is not None:
self.blob_obj = self.get_blob()
if self.state == 'present':
if not self.container_obj:
# create the container
self.create_container()
elif self.container_obj and not self.blob:
# update container attributes
update_tags, self.container_obj['tags'] = self.update_tags(self.container_obj.get('tags'))
if update_tags:
self.update_container_tags(self.container_obj['tags'])
if self.blob:
# create, update or download blob
if self.src and self.src_is_valid():
if self.blob_obj and not self.force:
self.log("Cannot upload to {0}. Blob with that name already exists. "
"Use the force option".format(self.blob))
else:
self.upload_blob()
elif self.dest and self.dest_is_valid():
self.download_blob()
update_tags, self.blob_obj['tags'] = self.update_tags(self.blob_obj.get('tags'))
if update_tags:
self.update_blob_tags(self.blob_obj['tags'])
if self.blob_content_settings_differ():
self.update_blob_content_settings()
elif self.state == 'absent':
if self.container_obj and not self.blob:
# Delete container
if self.container_has_blobs():
if self.force:
self.delete_container()
else:
self.log("Cannot delete container {0}. It contains blobs. Use the force option.".format(
self.container))
else:
self.delete_container()
elif self.container_obj and self.blob_obj:
# Delete blob
self.delete_blob()
# until we sort out how we want to do this globally
del self.results['actions']
return self.results
def get_container(self):
result = dict()
container = None
if self.container:
try:
container = self.blob_client.get_container_properties(self.container)
except AzureMissingResourceHttpError:
pass
if container:
result = dict(
name=container.name,
tags=container.metadata,
last_mdoified=container.properties.last_modified.strftime('%d-%b-%Y %H:%M:%S %z'),
)
return result
def get_blob(self):
result = dict()
blob = None
if self.blob:
try:
blob = self.blob_client.get_blob_properties(self.container, self.blob)
except AzureMissingResourceHttpError:
pass
if blob:
result = dict(
name=blob.name,
tags=blob.metadata,
last_modified=blob.properties.last_modified.strftime('%d-%b-%Y %H:%M:%S %z'),
type=blob.properties.blob_type,
content_length=blob.properties.content_length,
content_settings=dict(
content_type=blob.properties.content_settings.content_type,
content_encoding=blob.properties.content_settings.content_encoding,
content_language=blob.properties.content_settings.content_language,
content_disposition=blob.properties.content_settings.content_disposition,
cache_control=blob.properties.content_settings.cache_control,
content_md5 =blob.properties.content_settings.content_md5
)
)
return result
def create_container(self):
self.log('Create container %s' % self.container)
tags = None
if not self.blob and self.tags:
# when a blob is present, then tags are assigned at the blob level
tags = self.tags
if not self.check_mode:
try:
self.blob_client.create_container(self.container, metadata=tags, public_access=self.public_access)
except AzureHttpError as exc:
self.fail("Error creating container {0} - {1}".format(self.container, str(exc)))
self.container_obj = self.get_container()
self.results['changed'] = True
self.results['actions'].append('created container {0}'.format(self.container))
self.results['container'] = self.container_obj
def upload_blob(self):
content_settings = None
if self.content_type or self.content_encoding or self.content_language or self.content_disposition or \
self.cache_control or self.content_md5:
content_settings = ContentSettings(
content_type=self.content_type,
content_encoding=self.content_encoding,
content_language=self.content_language,
content_disposition=self.content_disposition,
cache_control=self.cache_control,
content_md5=self.content_md5
)
if not self.check_mode:
try:
self.blob_client.create_blob_from_path(self.container, self.blob, self.src,
metadata=self.tags, content_settings=content_settings)
except AzureHttpError as exc:
self.fail("Error creating blob {0} - {1}".format(self.blob, str(exc)))
self.blob_obj = self.get_blob()
self.results['changed'] = True
self.results['actions'].append('created blob {0} from {1}'.format(self.blob, self.src))
self.results['container'] = self.container_obj
self.results['blob'] = self.blob_obj
def download_blob(self):
if not self.check_mode:
try:
self.blob_client.get_blob_to_path(self.container, self.blob, self.dest)
except Exception as exc:
self.fail("Failed to download blob {0}:{1} to {2} - {3}".format(self.container,
self.blob,
self.dest,
exc))
self.results['changed'] = True
self.results['actions'].append('downloaded blob {0}:{1} to {2}'.format(self.container,
self.blob,
self.dest))
self.results['container'] = self.container_obj
self.results['blob'] = self.blob_obj
def src_is_valid(self):
if not os.path.isfile(self.src):
self.fail("The source path must be a file.")
try:
fp = open(self.src, 'r')
fp.close()
except IOError:
self.fail("Failed to access {0}. Make sure the file exists and that you have "
"read access.".format(self.src))
return True
def dest_is_valid(self):
if not self.check_mode:
self.dest = os.path.expanduser(self.dest)
self.dest = os.path.expandvars(self.dest)
if not os.path.basename(self.dest):
# dest is a directory
if os.path.isdir(self.dest):
self.log("Path is dir. Appending blob name.")
self.dest += self.blob
else:
try:
self.log('Attempting to makedirs {0}'.format(self.dest))
os.makddirs(self.dest)
except IOError as exc:
self.fail("Failed to create directory {0} - {1}".format(self.dest, str(exc)))
self.dest += self.blob
else:
# does path exist without basename
file_name = os.path.basename(self.dest)
path = self.dest.replace(file_name, '')
self.log('Checking path {0}'.format(path))
if not os.path.isdir(path):
try:
self.log('Attempting to makedirs {0}'.format(path))
os.makedirs(path)
except IOError as exc:
self.fail("Failed to create directory {0} - {1}".format(path, str(exc)))
self.log('Checking final path {0}'.format(self.dest))
if os.path.isfile(self.dest) and not self.force:
# dest already exists and we're not forcing
self.log("Dest {0} already exists. Cannot download. Use the force option.".format(self.dest))
return False
return True
def delete_container(self):
if not self.check_mode:
try:
self.blob_client.delete_container(self.container)
except AzureHttpError as exc:
self.fail("Error deleting container {0} - {1}".format(self.container, str(exc)))
self.results['changed'] = True
self.results['actions'].append('deleted container {0}'.format(self.container))
def container_has_blobs(self):
try:
list_generator = self.blob_client.list_blobs(self.container)
except AzureHttpError as exc:
self.fail("Error list blobs in {0} - {1}".format(self.container, str(exc)))
if len(list_generator.items) > 0:
return True
return False
def delete_blob(self):
if not self.check_mode:
try:
self.blob_client.delete_blob(self.container, self.blob)
except AzureHttpError as exc:
self.fail("Error deleting blob {0}:{1} - {2}".format(self.container, self.blob, str(exc)))
self.results['changed'] = True
self.results['actions'].append('deleted blob {0}:{1}'.format(self.container, self.blob))
self.results['container'] = self.container_obj
def update_container_tags(self, tags):
if not self.check_mode:
try:
self.blob_client.set_container_metadata(self.container, metadata=tags)
except AzureHttpError as exc:
self.fail("Error updating container tags {0} - {1}".format(self.container, str(exc)))
self.container_obj = self.get_container()
self.results['changed'] = True
self.results['actions'].append("updated container {0} tags.".format(self.container))
self.results['container'] = self.container_obj
def update_blob_tags(self, tags):
if not self.check_mode:
try:
self.blob_client.set_blob_metadata(self.container, self.blob, metadata=tags)
except AzureHttpError as exc:
self.fail("Update blob tags {0}:{1} - {2}".format(self.container, self.blob, str(exc)))
self.blob_obj = self.get_blob()
self.results['changed'] = True
self.results['actions'].append("updated blob {0}:{1} tags.".format(self.container, self.blob))
self.results['container'] = self.container_obj
self.results['blob'] = self.blob_obj
def blob_content_settings_differ(self):
if self.content_type or self.content_encoding or self.content_language or self.content_disposition or \
self.cache_control or self.content_md5:
settings = dict(
content_type=self.content_type,
content_encoding=self.content_encoding,
content_language=self.content_language,
content_disposition=self.content_disposition,
cache_control=self.cache_control,
content_md5=self.content_md5
)
if self.blob_obj['content_settings'] != settings:
return True
return False
def update_blob_content_settings(self):
content_settings = ContentSettings(
content_type=self.content_type,
content_encoding=self.content_encoding,
content_language=self.content_language,
content_disposition=self.content_disposition,
cache_control=self.cache_control,
content_md5=self.content_md5
)
if not self.check_mode:
try:
self.blob_client.set_blob_properties(self.container, self.blob, content_settings=content_settings)
except AzureHttpError as exc:
self.fail("Update blob content settings {0}:{1} - {2}".format(self.container, self.blob, str(exc)))
self.blob_obj = self.get_blob()
self.results['changed'] = True
self.results['actions'].append("updated blob {0}:{1} content settings.".format(self.container, self.blob))
self.results['container'] = self.container_obj
self.results['blob'] = self.blob_obj
def main():
AzureRMStorageBlob()
if __name__ == '__main__':
main()
| gpl-3.0 |
testbetta/git-repo-pub | subcmds/status.py | 35 | 6501 | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from command import PagedCommand
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
import glob
import itertools
import os
from color import Coloring
class Status(PagedCommand):
common = True
helpSummary = "Show the working tree status"
helpUsage = """
%prog [<project>...]
"""
helpDescription = """
'%prog' compares the working tree to the staging area (aka index),
and the most recent commit on this branch (HEAD), in each project
specified. A summary is displayed, one line per file where there
is a difference between these three states.
The -j/--jobs option can be used to run multiple status queries
in parallel.
The -o/--orphans option can be used to show objects that are in
the working directory, but not associated with a repo project.
This includes unmanaged top-level files and directories, but also
includes deeper items. For example, if dir/subdir/proj1 and
dir/subdir/proj2 are repo projects, dir/subdir/proj3 will be shown
if it is not known to repo.
Status Display
--------------
The status display is organized into three columns of information,
for example if the file 'subcmds/status.py' is modified in the
project 'repo' on branch 'devwork':
project repo/ branch devwork
-m subcmds/status.py
The first column explains how the staging area (index) differs from
the last commit (HEAD). Its values are always displayed in upper
case and have the following meanings:
-: no difference
A: added (not in HEAD, in index )
M: modified ( in HEAD, in index, different content )
D: deleted ( in HEAD, not in index )
R: renamed (not in HEAD, in index, path changed )
C: copied (not in HEAD, in index, copied from another)
T: mode changed ( in HEAD, in index, same content )
U: unmerged; conflict resolution required
The second column explains how the working directory differs from
the index. Its values are always displayed in lower case and have
the following meanings:
-: new / unknown (not in index, in work tree )
m: modified ( in index, in work tree, modified )
d: deleted ( in index, not in work tree )
"""
def _Options(self, p):
p.add_option('-j', '--jobs',
dest='jobs', action='store', type='int', default=2,
help="number of projects to check simultaneously")
p.add_option('-o', '--orphans',
dest='orphans', action='store_true',
help="include objects in working directory outside of repo projects")
def _StatusHelper(self, project, clean_counter, sem):
"""Obtains the status for a specific project.
Obtains the status for a project, redirecting the output to
the specified object. It will release the semaphore
when done.
Args:
project: Project to get status of.
clean_counter: Counter for clean projects.
sem: Semaphore, will call release() when complete.
output: Where to output the status.
"""
try:
state = project.PrintWorkTreeStatus()
if state == 'CLEAN':
next(clean_counter)
finally:
sem.release()
def _FindOrphans(self, dirs, proj_dirs, proj_dirs_parents, outstring):
"""find 'dirs' that are present in 'proj_dirs_parents' but not in 'proj_dirs'"""
status_header = ' --\t'
for item in dirs:
if not os.path.isdir(item):
outstring.append(''.join([status_header, item]))
continue
if item in proj_dirs:
continue
if item in proj_dirs_parents:
self._FindOrphans(glob.glob('%s/.*' % item) +
glob.glob('%s/*' % item),
proj_dirs, proj_dirs_parents, outstring)
continue
outstring.append(''.join([status_header, item, '/']))
def Execute(self, opt, args):
all_projects = self.GetProjects(args)
counter = itertools.count()
if opt.jobs == 1:
for project in all_projects:
state = project.PrintWorkTreeStatus()
if state == 'CLEAN':
next(counter)
else:
sem = _threading.Semaphore(opt.jobs)
threads = []
for project in all_projects:
sem.acquire()
t = _threading.Thread(target=self._StatusHelper,
args=(project, counter, sem))
threads.append(t)
t.daemon = True
t.start()
for t in threads:
t.join()
if len(all_projects) == next(counter):
print('nothing to commit (working directory clean)')
if opt.orphans:
proj_dirs = set()
proj_dirs_parents = set()
for project in self.GetProjects(None, missing_ok=True):
proj_dirs.add(project.relpath)
(head, _tail) = os.path.split(project.relpath)
while head != "":
proj_dirs_parents.add(head)
(head, _tail) = os.path.split(head)
proj_dirs.add('.repo')
class StatusColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'status')
self.project = self.printer('header', attr = 'bold')
self.untracked = self.printer('untracked', fg = 'red')
orig_path = os.getcwd()
try:
os.chdir(self.manifest.topdir)
outstring = []
self._FindOrphans(glob.glob('.*') +
glob.glob('*'),
proj_dirs, proj_dirs_parents, outstring)
if outstring:
output = StatusColoring(self.manifest.globalConfig)
output.project('Objects not within a project (orphans)')
output.nl()
for entry in outstring:
output.untracked(entry)
output.nl()
else:
print('No orphan files or directories')
finally:
# Restore CWD.
os.chdir(orig_path)
| apache-2.0 |
Huyuwei/tvm | tests/python/unittest/test_lang_data_layout.py | 2 | 3014 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test layout and bijective-layout node"""
import tvm
from topi.util import get_const_tuple
def test_layout():
layout = tvm.layout("NCHW16c")
assert layout is not None
assert isinstance(layout, tvm.tensor.Layout)
assert layout.factor_of("c") == 16
assert layout.factor_of("C") == 16
assert layout.factor_of("N") == -1
assert layout.index_of("N") == 0
assert layout.index_of("C") == 1
assert layout.index_of("H") == 2
assert layout.index_of("W") == 3
assert layout.index_of("c") == 4
assert layout.index_of("O") == -1
assert "N" in layout
assert "C" in layout
assert "H" in layout
assert "W" in layout
assert "c" in layout
assert "O" not in layout
assert layout[0] == "N"
assert layout[1] == "C"
assert layout[2] == "H"
assert layout[3] == "W"
assert layout[4] == "c"
assert layout[-1] == "c"
def test_bilayout_convertible():
# not convertible
assert tvm.bijective_layout("NCHW", "ABCD") is None
assert tvm.bijective_layout("__undef__", "NCHW") is None
assert tvm.bijective_layout("NCHW", "__undef__") is None
assert tvm.bijective_layout("__undef__", "__undef__") is None
assert tvm.bijective_layout("", "NCHW") is None
assert tvm.bijective_layout("NCHW", "") is None
assert tvm.bijective_layout("", "") is None
# convertible
assert tvm.bijective_layout("NCHW", "NCHW16c") is not None
def test_bilayout_shape():
bilayout = tvm.bijective_layout("NCHW", "NCHW16c")
assert isinstance(bilayout, tvm.tensor.BijectiveLayout)
dst_shape = bilayout.forward_shape((1, 32, 7, 7))
assert get_const_tuple(dst_shape) == (1, 2, 7, 7, 16)
src_shape = bilayout.backward_shape(dst_shape)
assert get_const_tuple(src_shape) == (1, 32, 7, 7)
def test_bilayout_index():
bilayout = tvm.bijective_layout("NCHW", "NCHW16c")
dst_index = bilayout.forward_index([0, 18, 6, 6])
assert get_const_tuple(dst_index) == (0, 1, 6, 6, 2)
src_index = bilayout.backward_index([0, 1, 6, 6, 2])
assert get_const_tuple(src_index) == (0, 18, 6, 6)
if __name__ == "__main__":
test_layout()
test_bilayout_convertible()
test_bilayout_shape()
test_bilayout_index()
| apache-2.0 |
Amechi101/concepteur-market-app | venv/lib/python2.7/site-packages/django/contrib/sessions/backends/cache.py | 122 | 2505 | from django.conf import settings
from django.contrib.sessions.backends.base import SessionBase, CreateError
from django.core.cache import get_cache
from django.utils.six.moves import xrange
KEY_PREFIX = "django.contrib.sessions.cache"
class SessionStore(SessionBase):
"""
A cache-based session store.
"""
def __init__(self, session_key=None):
self._cache = get_cache(settings.SESSION_CACHE_ALIAS)
super(SessionStore, self).__init__(session_key)
@property
def cache_key(self):
return KEY_PREFIX + self._get_or_create_session_key()
def load(self):
try:
session_data = self._cache.get(self.cache_key, None)
except Exception:
# Some backends (e.g. memcache) raise an exception on invalid
# cache keys. If this happens, reset the session. See #17810.
session_data = None
if session_data is not None:
return session_data
self.create()
return {}
def create(self):
# Because a cache can fail silently (e.g. memcache), we don't know if
# we are failing to create a new session because of a key collision or
# because the cache is missing. So we try for a (large) number of times
# and then raise an exception. That's the risk you shoulder if using
# cache backing.
for i in xrange(10000):
self._session_key = self._get_new_session_key()
try:
self.save(must_create=True)
except CreateError:
continue
self.modified = True
return
raise RuntimeError(
"Unable to create a new session key. "
"It is likely that the cache is unavailable.")
def save(self, must_create=False):
if must_create:
func = self._cache.add
else:
func = self._cache.set
result = func(self.cache_key,
self._get_session(no_load=must_create),
self.get_expiry_age())
if must_create and not result:
raise CreateError
def exists(self, session_key):
return (KEY_PREFIX + session_key) in self._cache
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
self._cache.delete(KEY_PREFIX + session_key)
@classmethod
def clear_expired(cls):
pass
| mit |
mpasternak/pyglet-fix-issue-552 | tests/graphics/IMMEDIATE.py | 18 | 2241 | #!/usr/bin/python
# $Id:$
import unittest
import pyglet
from graphics_common import *
__noninteractive = True
class TEST_CASE(unittest.TestCase):
def check(self, expected, result, dimensions):
if len(expected) != len(result) * dimensions / 4:
self.fail('Incorrect number of vertices in feedback array')
for d in range(2): # Don't check Z or W
for e, r in zip(expected[d::dimensions], result[d::4]):
if abs(e - r) > 0.01:
self.fail('Feedback array is in error: %r, %r' % \
(e, r))
def generic_test(self, v_fmt, v_data,
c_fmt=None, c_data=None,
t_fmt=None, t_data=None):
data = [(v_fmt, v_data)]
n_v = int(v_fmt[1])
if c_fmt:
data.append((c_fmt, c_data))
n_c = int(c_fmt[1])
if t_fmt:
data.append((t_fmt, t_data))
n_t = int(t_fmt[1])
vertices, colors, tex_coords = get_feedback(lambda: \
pyglet.graphics.draw(n_vertices, GL_TRIANGLES, *data))
self.check(v_data, vertices, n_v)
if c_fmt:
self.check(c_data, colors, n_c)
if t_fmt:
self.check(t_data, tex_coords, n_t)
def test_v2f(self):
self.generic_test('v2f', v2f_data)
def test_v3f(self):
self.generic_test('v3f', v3f_data)
def test_v2f_c3f(self):
self.generic_test('v2f', v2f_data, 'c3f', c3f_data)
def test_v2f_c4f(self):
self.generic_test('v2f', v2f_data, 'c4f', c4f_data)
def test_v3f_c3f(self):
self.generic_test('v3f', v3f_data, 'c3f', c3f_data)
def test_v3f_c4f(self):
self.generic_test('v3f', v3f_data, 'c4f', c4f_data)
def test_v2f_t2f(self):
self.generic_test('v2f', v2f_data, None, None, 't2f', t2f_data)
def test_v3f_c3f_t2f(self):
self.generic_test('v3f', v3f_data, 'c3f', c3f_data, 't2f', t2f_data)
def test_v3f_c3f_t3f(self):
self.generic_test('v3f', v3f_data, 'c3f', c3f_data, 't3f', t3f_data)
def test_v3f_c4f_t4f(self):
self.generic_test('v3f', v3f_data, 'c4f', c4f_data, 't4f', t4f_data)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
oseledets/pybtex | pybtex/docgen/mystyle.py | 5 | 2874 | # Copyright (c) 2007, 2008, 2009, 2010, 2011, 2012 Andrey Golovizin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic
class MyHiglightStyle(Style):
"""
Port of the default trac highlighter design.
"""
default_style = ''
styles = {
Comment: 'italic #999988',
# Comment.Preproc: 'bold noitalic #999999',
# Comment.Special: 'bold #999999',
Operator: 'bold',
String: '#B81',
String.Escape: '#900',
# String.Regex: '#808000',
Number: '#590 bold',
Keyword: 'bold',
# Keyword.Type: '#445588',
Name.Builtin: '#840',
Name.Function: 'bold #840',
Name.Class: 'bold #900',
Name.Exception: 'bold #A00',
Name.Decorator: '#840',
Name.Namespace: '#900',
# Name.Variable: '#088',
# Name.Constant: '#088',
Name.Tag: '#840',
# Name.Tag: '#000080',
# Name.Attribute: '#008080',
# Name.Entity: '#800080',
# Generic.Heading: '#999999',
# Generic.Subheading: '#aaaaaa',
# Generic.Deleted: 'bg:#ffdddd #000000',
# Generic.Inserted: 'bg:#ddffdd #000000',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: 'bg:#e3d2d2 #a61717'
}
| mit |
pculture/django-uploadtemplate | uploadtemplate/tests/unit/test_forms.py | 1 | 4092 | import zipfile
from django.core.exceptions import ValidationError
import mock
from uploadtemplate.forms import ThemeForm
from uploadtemplate.models import Theme
from uploadtemplate.tests import BaseTestCase
from uploadtemplate.utils import is_zipfile
class ThemeFormTestCase(BaseTestCase):
def test_name_not_given(self):
form = ThemeForm({'name': '',
'theme_files_zip': None})
self.assertFalse(form.is_valid())
def test_zip_not_given(self):
form = ThemeForm({'name': 'theme',
'theme_files_zip': None})
self.assertTrue(form.is_valid())
self.assertTrue(form.cleaned_data['theme_files_zip'] is None)
def test_clean_zip__invalid(self):
form = ThemeForm()
data_file = self._data_file('zips/invalid.zip')
form.cleaned_data = {'theme_files_zip': data_file}
self.assertRaisesMessage(ValidationError,
'Must be a valid zip archive.',
form.clean_theme_files_zip)
data_file.close()
def test_clean_zip__empty(self):
form = ThemeForm()
data_file = self._data_file('zips/empty.zip')
form.cleaned_data = {'theme_files_zip': data_file}
# Python 2.6 can't actually open this empty file because of a bug.
if is_zipfile(data_file):
zip_file = zipfile.ZipFile(data_file)
self.assertEqual(len(zip_file.namelist()), 0)
zip_file.close()
self.assertRaisesMessage(ValidationError,
'Zip archive cannot be empty.',
form.clean_theme_files_zip)
else:
self.assertRaisesMessage(ValidationError,
'Must be a valid zip archive.',
form.clean_theme_files_zip)
data_file.close()
def test_clean_zip__evil_root(self):
form = ThemeForm()
data_file = self._data_file('zips/evil_root.zip')
form.cleaned_data = {'theme_files_zip': data_file}
zip_file = zipfile.ZipFile(data_file, 'r')
namelist = zip_file.namelist()
zip_file.close()
self.assertEqual(len(namelist), 1)
self.assertTrue(namelist[0].startswith('/'))
self.assertRaisesMessage(ValidationError,
'Zip archive contains invalid names.',
form.clean_theme_files_zip)
data_file.close()
def test_clean_zip__evil_relative(self):
form = ThemeForm()
data_file = self._data_file('zips/evil_relative.zip')
form.cleaned_data = {'theme_files_zip': data_file}
zip_file = zipfile.ZipFile(data_file, 'r')
namelist = zip_file.namelist()
zip_file.close()
self.assertEqual(len(namelist), 1)
self.assertTrue('..' in namelist[0].split('/'))
self.assertRaisesMessage(ValidationError,
'Zip archive contains invalid names.',
form.clean_theme_files_zip)
data_file.close()
def test_clean_zip__valid(self):
form = ThemeForm()
data_file = self._data_file('zips/theme.zip')
form.cleaned_data = {'theme_files_zip': data_file}
self.assertEqual(form.clean_theme_files_zip(), data_file)
data_file.close()
def test_save(self):
data_file = self._data_file('zips/theme.zip')
with mock.patch.object(Theme, 'save_files') as save_files:
with mock.patch.object(Theme, 'prune_files') as prune_files:
form = ThemeForm({'name': 'Theme',
'theme_files_zip': data_file})
self.assertTrue(form.is_valid())
self.assertTrue(form.instance.site_id is None)
form.save()
save_files.assert_called_once_with()
prune_files.assert_called_once_with()
self.assertFalse(form.instance.site_id is None)
data_file.close()
| bsd-3-clause |
vjmac15/Lyilis | lib/youtube_dl/extractor/gdcvault.py | 27 | 6692 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
HEADRequest,
sanitized_Request,
urlencode_postdata,
)
class GDCVaultIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gdcvault\.com/play/(?P<id>\d+)/(?P<name>(\w|-)+)?'
_NETRC_MACHINE = 'gdcvault'
_TESTS = [
{
'url': 'http://www.gdcvault.com/play/1019721/Doki-Doki-Universe-Sweet-Simple',
'md5': '7ce8388f544c88b7ac11c7ab1b593704',
'info_dict': {
'id': '1019721',
'display_id': 'Doki-Doki-Universe-Sweet-Simple',
'ext': 'mp4',
'title': 'Doki-Doki Universe: Sweet, Simple and Genuine (GDC Next 10)'
}
},
{
'url': 'http://www.gdcvault.com/play/1015683/Embracing-the-Dark-Art-of',
'info_dict': {
'id': '1015683',
'display_id': 'Embracing-the-Dark-Art-of',
'ext': 'flv',
'title': 'Embracing the Dark Art of Mathematical Modeling in AI'
},
'params': {
'skip_download': True, # Requires rtmpdump
}
},
{
'url': 'http://www.gdcvault.com/play/1015301/Thexder-Meets-Windows-95-or',
'md5': 'a5eb77996ef82118afbbe8e48731b98e',
'info_dict': {
'id': '1015301',
'display_id': 'Thexder-Meets-Windows-95-or',
'ext': 'flv',
'title': 'Thexder Meets Windows 95, or Writing Great Games in the Windows 95 Environment',
},
'skip': 'Requires login',
},
{
'url': 'http://gdcvault.com/play/1020791/',
'only_matching': True,
},
{
# Hard-coded hostname
'url': 'http://gdcvault.com/play/1023460/Tenacious-Design-and-The-Interface',
'md5': 'a8efb6c31ed06ca8739294960b2dbabd',
'info_dict': {
'id': '1023460',
'ext': 'mp4',
'display_id': 'Tenacious-Design-and-The-Interface',
'title': 'Tenacious Design and The Interface of \'Destiny\'',
},
},
{
# Multiple audios
'url': 'http://www.gdcvault.com/play/1014631/Classic-Game-Postmortem-PAC',
'info_dict': {
'id': '1014631',
'ext': 'flv',
'title': 'How to Create a Good Game - From My Experience of Designing Pac-Man',
},
'params': {
'skip_download': True, # Requires rtmpdump
'format': 'jp', # The japanese audio
}
},
{
# gdc-player.html
'url': 'http://www.gdcvault.com/play/1435/An-American-engine-in-Tokyo',
'info_dict': {
'id': '1435',
'display_id': 'An-American-engine-in-Tokyo',
'ext': 'flv',
'title': 'An American Engine in Tokyo:/nThe collaboration of Epic Games and Square Enix/nFor THE LAST REMINANT',
},
'params': {
'skip_download': True, # Requires rtmpdump
},
},
]
def _login(self, webpage_url, display_id):
(username, password) = self._get_login_info()
if username is None or password is None:
self.report_warning('It looks like ' + webpage_url + ' requires a login. Try specifying a username and password and try again.')
return None
mobj = re.match(r'(?P<root_url>https?://.*?/).*', webpage_url)
login_url = mobj.group('root_url') + 'api/login.php'
logout_url = mobj.group('root_url') + 'logout'
login_form = {
'email': username,
'password': password,
}
request = sanitized_Request(login_url, urlencode_postdata(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
self._download_webpage(request, display_id, 'Logging in')
start_page = self._download_webpage(webpage_url, display_id, 'Getting authenticated video page')
self._download_webpage(logout_url, display_id, 'Logging out')
return start_page
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('name') or video_id
webpage_url = 'http://www.gdcvault.com/play/' + video_id
start_page = self._download_webpage(webpage_url, display_id)
direct_url = self._search_regex(
r's1\.addVariable\("file",\s*encodeURIComponent\("(/[^"]+)"\)\);',
start_page, 'url', default=None)
if direct_url:
title = self._html_search_regex(
r'<td><strong>Session Name</strong></td>\s*<td>(.*?)</td>',
start_page, 'title')
video_url = 'http://www.gdcvault.com' + direct_url
# resolve the url so that we can detect the correct extension
head = self._request_webpage(HEADRequest(video_url), video_id)
video_url = head.geturl()
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'title': title,
}
PLAYER_REGEX = r'<iframe src="(?P<xml_root>.+?)/(?:gdc-)?player.*?\.html.*?".*?</iframe>'
xml_root = self._html_search_regex(
PLAYER_REGEX, start_page, 'xml root', default=None)
if xml_root is None:
# Probably need to authenticate
login_res = self._login(webpage_url, display_id)
if login_res is None:
self.report_warning('Could not login.')
else:
start_page = login_res
# Grab the url from the authenticated page
xml_root = self._html_search_regex(
PLAYER_REGEX, start_page, 'xml root')
xml_name = self._html_search_regex(
r'<iframe src=".*?\?xml=(.+?\.xml).*?".*?</iframe>',
start_page, 'xml filename', default=None)
if xml_name is None:
# Fallback to the older format
xml_name = self._html_search_regex(
r'<iframe src=".*?\?xmlURL=xml/(?P<xml_file>.+?\.xml).*?".*?</iframe>',
start_page, 'xml filename')
return {
'_type': 'url_transparent',
'id': video_id,
'display_id': display_id,
'url': '%s/xml/%s' % (xml_root, xml_name),
'ie_key': 'DigitallySpeaking',
}
| gpl-3.0 |
ChristianKniep/QNIB | serverfiles/usr/local/lib/networkx-1.6/build/lib/networkx/tests/test_convert_numpy.py | 44 | 6312 | from nose import SkipTest
from nose.tools import assert_raises, assert_true, assert_equal
import networkx as nx
from networkx.generators.classic import barbell_graph,cycle_graph,path_graph
class TestConvertNumpy(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
global np
global np_assert_equal
try:
import numpy as np
np_assert_equal=np.testing.assert_equal
except ImportError:
raise SkipTest('NumPy not available.')
def __init__(self):
self.G1 = barbell_graph(10, 3)
self.G2 = cycle_graph(10, create_using=nx.DiGraph())
self.G3 = self.create_weighted(nx.Graph())
self.G4 = self.create_weighted(nx.DiGraph())
def create_weighted(self, G):
g = cycle_graph(4)
e = g.edges()
source = [u for u,v in e]
dest = [v for u,v in e]
weight = [s+10 for s in source]
ex = zip(source, dest, weight)
G.add_weighted_edges_from(ex)
return G
def assert_equal(self, G1, G2):
assert_true( sorted(G1.nodes())==sorted(G2.nodes()) )
assert_true( sorted(G1.edges())==sorted(G2.edges()) )
def identity_conversion(self, G, A, create_using):
GG = nx.from_numpy_matrix(A, create_using=create_using)
self.assert_equal(G, GG)
GW = nx.to_networkx_graph(A, create_using=create_using)
self.assert_equal(G, GW)
GI = create_using.__class__(A)
self.assert_equal(G, GI)
def test_shape(self):
"Conversion from non-square array."
A=np.array([[1,2,3],[4,5,6]])
assert_raises(nx.NetworkXError, nx.from_numpy_matrix, A)
def test_identity_graph_matrix(self):
"Conversion from graph to matrix to graph."
A = nx.to_numpy_matrix(self.G1)
self.identity_conversion(self.G1, A, nx.Graph())
def test_identity_graph_array(self):
"Conversion from graph to array to graph."
A = nx.to_numpy_matrix(self.G1)
A = np.asarray(A)
self.identity_conversion(self.G1, A, nx.Graph())
def test_identity_digraph_matrix(self):
"""Conversion from digraph to matrix to digraph."""
A = nx.to_numpy_matrix(self.G2)
self.identity_conversion(self.G2, A, nx.DiGraph())
def test_identity_digraph_array(self):
"""Conversion from digraph to array to digraph."""
A = nx.to_numpy_matrix(self.G2)
A = np.asarray(A)
self.identity_conversion(self.G2, A, nx.DiGraph())
def test_identity_weighted_graph_matrix(self):
"""Conversion from weighted graph to matrix to weighted graph."""
A = nx.to_numpy_matrix(self.G3)
self.identity_conversion(self.G3, A, nx.Graph())
def test_identity_weighted_graph_array(self):
"""Conversion from weighted graph to array to weighted graph."""
A = nx.to_numpy_matrix(self.G3)
A = np.asarray(A)
self.identity_conversion(self.G3, A, nx.Graph())
def test_identity_weighted_digraph_matrix(self):
"""Conversion from weighted digraph to matrix to weighted digraph."""
A = nx.to_numpy_matrix(self.G4)
self.identity_conversion(self.G4, A, nx.DiGraph())
def test_identity_weighted_digraph_array(self):
"""Conversion from weighted digraph to array to weighted digraph."""
A = nx.to_numpy_matrix(self.G4)
A = np.asarray(A)
self.identity_conversion(self.G4, A, nx.DiGraph())
def test_nodelist(self):
"""Conversion from graph to matrix to graph with nodelist."""
P4 = path_graph(4)
P3 = path_graph(3)
nodelist = P3.nodes()
A = nx.to_numpy_matrix(P4, nodelist=nodelist)
GA = nx.Graph(A)
self.assert_equal(GA, P3)
# Make nodelist ambiguous by containing duplicates.
nodelist += [nodelist[0]]
assert_raises(nx.NetworkXError, nx.to_numpy_matrix, P3, nodelist=nodelist)
def test_weight_keyword(self):
WP4 = nx.Graph()
WP4.add_edges_from( (n,n+1,dict(weight=0.5,other=0.3)) for n in range(3) )
P4 = path_graph(4)
A = nx.to_numpy_matrix(P4)
np_assert_equal(A, nx.to_numpy_matrix(WP4,weight=None))
np_assert_equal(0.5*A, nx.to_numpy_matrix(WP4))
np_assert_equal(0.3*A, nx.to_numpy_matrix(WP4,weight='other'))
def test_from_numpy_matrix_type(self):
A=np.matrix([[1]])
G=nx.from_numpy_matrix(A)
assert_equal(type(G[0][0]['weight']),int)
A=np.matrix([[1]]).astype(np.float)
G=nx.from_numpy_matrix(A)
assert_equal(type(G[0][0]['weight']),float)
A=np.matrix([[1]]).astype(np.str)
G=nx.from_numpy_matrix(A)
assert_equal(type(G[0][0]['weight']),str)
A=np.matrix([[1]]).astype(np.bool)
G=nx.from_numpy_matrix(A)
assert_equal(type(G[0][0]['weight']),bool)
A=np.matrix([[1]]).astype(np.complex)
G=nx.from_numpy_matrix(A)
assert_equal(type(G[0][0]['weight']),complex)
A=np.matrix([[1]]).astype(np.object)
assert_raises(TypeError,nx.from_numpy_matrix,A)
def test_from_numpy_matrix_dtype(self):
dt=[('weight',float),('cost',int)]
A=np.matrix([[(1.0,2)]],dtype=dt)
G=nx.from_numpy_matrix(A)
assert_equal(type(G[0][0]['weight']),float)
assert_equal(type(G[0][0]['cost']),int)
assert_equal(G[0][0]['cost'],2)
assert_equal(G[0][0]['weight'],1.0)
def test_to_numpy_recarray(self):
G=nx.Graph()
G.add_edge(1,2,weight=7.0,cost=5)
A=nx.to_numpy_recarray(G,dtype=[('weight',float),('cost',int)])
assert_equal(sorted(A.dtype.names),['cost','weight'])
assert_equal(A.weight[0,1],7.0)
assert_equal(A.weight[0,0],0.0)
assert_equal(A.cost[0,1],5)
assert_equal(A.cost[0,0],0)
def test_numpy_multigraph(self):
G=nx.MultiGraph()
G.add_edge(1,2,weight=7)
G.add_edge(1,2,weight=70)
A=nx.to_numpy_matrix(G)
assert_equal(A[1,0],77)
A=nx.to_numpy_matrix(G,multigraph_weight=min)
assert_equal(A[1,0],7)
A=nx.to_numpy_matrix(G,multigraph_weight=max)
assert_equal(A[1,0],70)
| gpl-2.0 |
stone5495/NewsBlur | utils/feedfinder.py | 17 | 13464 | """feedfinder: Find the Web feed for a Web page
http://www.aaronsw.com/2002/feedfinder/
Usage:
feed(uri) - returns feed found for a URI
feeds(uri) - returns all feeds found for a URI
>>> import feedfinder
>>> feedfinder.feed('scripting.com')
'http://scripting.com/rss.xml'
>>>
>>> feedfinder.feeds('scripting.com')
['http://delong.typepad.com/sdj/atom.xml',
'http://delong.typepad.com/sdj/index.rdf',
'http://delong.typepad.com/sdj/rss.xml']
>>>
Can also use from the command line. Feeds are returned one per line:
$ python feedfinder.py diveintomark.org
http://diveintomark.org/xml/atom.xml
How it works:
0. At every step, feeds are minimally verified to make sure they are really feeds.
1. If the URI points to a feed, it is simply returned; otherwise
the page is downloaded and the real fun begins.
2. Feeds pointed to by LINK tags in the header of the page (autodiscovery)
3. <A> links to feeds on the same server ending in ".rss", ".rdf", ".xml", or
".atom"
4. <A> links to feeds on the same server containing "rss", "rdf", "xml", or "atom"
5. <A> links to feeds on external servers ending in ".rss", ".rdf", ".xml", or
".atom"
6. <A> links to feeds on external servers containing "rss", "rdf", "xml", or "atom"
7. Try some guesses about common places for feeds (index.xml, atom.xml, etc.).
8. As a last ditch effort, we search Syndic8 for feeds matching the URI
"""
__version__ = "1.371"
__date__ = "2006-04-24"
__maintainer__ = "Aaron Swartz (me@aaronsw.com)"
__author__ = "Mark Pilgrim (http://diveintomark.org)"
__copyright__ = "Copyright 2002-4, Mark Pilgrim; 2006 Aaron Swartz"
__license__ = "Python"
__credits__ = """Abe Fettig for a patch to sort Syndic8 feeds by popularity
Also Jason Diamond, Brian Lalor for bug reporting and patches"""
_debug = 0
import sgmllib, urllib, urlparse, re, sys, robotparser
import requests
from StringIO import StringIO
from lxml import etree
# XML-RPC support allows feedfinder to query Syndic8 for possible matches.
# Python 2.3 now comes with this module by default, otherwise you can download it
try:
import xmlrpclib # http://www.pythonware.com/products/xmlrpc/
except ImportError:
xmlrpclib = None
if not dict:
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
def _debuglog(message):
if _debug: print message
class URLGatekeeper:
"""a class to track robots.txt rules across multiple servers"""
def __init__(self):
self.rpcache = {} # a dictionary of RobotFileParser objects, by domain
self.urlopener = urllib.FancyURLopener()
self.urlopener.version = "NewsBlur Feed Finder (Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_1) AppleWebKit/534.48.3 (KHTML, like Gecko) Version/5.1 Safari/534.48.3)"
_debuglog(self.urlopener.version)
self.urlopener.addheaders = [('User-Agent', self.urlopener.version)]
# self.urlopener.addheaders = [('User-Agent', self.urlopener.version), ('Accept', '*')]
robotparser.URLopener.version = self.urlopener.version
robotparser.URLopener.addheaders = self.urlopener.addheaders
def _getrp(self, url):
protocol, domain = urlparse.urlparse(url)[:2]
if self.rpcache.has_key(domain):
return self.rpcache[domain]
baseurl = '%s://%s' % (protocol, domain)
robotsurl = urlparse.urljoin(baseurl, 'robots.txt')
_debuglog('fetching %s' % robotsurl)
rp = robotparser.RobotFileParser(robotsurl)
try:
rp.read()
except:
pass
self.rpcache[domain] = rp
return rp
def can_fetch(self, url):
rp = self._getrp(url)
allow = rp.can_fetch(self.urlopener.version, url)
_debuglog("gatekeeper of %s says %s" % (url, allow))
return allow
def get(self, url, check=False):
if check and not self.can_fetch(url): return ''
try:
return requests.get(url, headers=dict(self.urlopener.addheaders)).content
except:
return ''
_gatekeeper = URLGatekeeper()
class BaseParser(sgmllib.SGMLParser):
def __init__(self, baseuri):
sgmllib.SGMLParser.__init__(self)
self.links = []
self.baseuri = baseuri
def normalize_attrs(self, attrs):
def cleanattr(v):
v = sgmllib.charref.sub(lambda m: unichr(int(m.groups()[0])), v)
if not v: return
v = v.strip()
v = v.replace('<', '<').replace('>', '>').replace(''', "'").replace('"', '"').replace('&', '&')
return v
attrs = [(k.lower(), cleanattr(v)) for k, v in attrs if cleanattr(v)]
attrs = [(k, k in ('rel','type') and v.lower() or v) for k, v in attrs if cleanattr(v)]
return attrs
def do_base(self, attrs):
attrsD = dict(self.normalize_attrs(attrs))
if not attrsD.has_key('href'): return
self.baseuri = attrsD['href']
def error(self, *a, **kw): pass # we're not picky
class LinkParser(BaseParser):
FEED_TYPES = ('application/rss+xml',
'text/xml',
'application/atom+xml',
'application/x.atom+xml',
'application/x-atom+xml')
def do_link(self, attrs):
attrsD = dict(self.normalize_attrs(attrs))
if not attrsD.has_key('rel'): return
rels = attrsD['rel'].split()
if 'alternate' not in rels: return
if attrsD.get('type') not in self.FEED_TYPES: return
if not attrsD.has_key('href'): return
self.links.append(urlparse.urljoin(self.baseuri, attrsD['href']))
class ALinkParser(BaseParser):
def start_a(self, attrs):
attrsD = dict(self.normalize_attrs(attrs))
if not attrsD.has_key('href'): return
self.links.append(urlparse.urljoin(self.baseuri, attrsD['href']))
def makeFullURI(uri):
if not uri: return
uri = uri.strip()
if uri.startswith('feed://'):
uri = 'http://' + uri.split('feed://', 1).pop()
for x in ['http', 'https']:
if uri.startswith('%s://' % x):
return uri
return 'http://%s' % uri
def getLinks(data, baseuri):
p = LinkParser(baseuri)
p.feed(data)
return p.links
def getLinksLXML(data, baseuri):
parser = etree.HTMLParser(recover=True)
tree = etree.parse(StringIO(data), parser)
links = []
for link in tree.findall('.//link'):
if link.attrib.get('type') in LinkParser.FEED_TYPES:
href = link.attrib['href']
if href: links.append(href)
return links
def getALinks(data, baseuri):
p = ALinkParser(baseuri)
p.feed(data)
return p.links
def getLocalLinks(links, baseuri):
found_links = []
if not baseuri: return found_links
baseuri = baseuri.lower()
for l in links:
try:
if l.lower().startswith(baseuri):
found_links.append(l)
except (AttributeError, UnicodeDecodeError):
pass
return found_links
def isFeedLink(link):
return link[-4:].lower() in ('.rss', '.rdf', '.xml', '.atom')
def isXMLRelatedLink(link):
link = link.lower()
return link.count('rss') + link.count('rdf') + link.count('xml') + link.count('atom')
r_brokenRedirect = re.compile('<newLocation[^>]*>(.*?)</newLocation>', re.S)
def tryBrokenRedirect(data):
if '<newLocation' in data:
newuris = r_brokenRedirect.findall(data)
if newuris and newuris[0]: return newuris[0].strip()
def couldBeFeedData(data):
data = data.lower()
if data.count('<html'): return 0
return data.count('<rss') + data.count('<rdf') + data.count('<feed')
def isFeed(uri):
_debuglog('seeing if %s is a feed' % uri)
protocol = urlparse.urlparse(uri)
if protocol[0] not in ('http', 'https'): return 0
try:
data = _gatekeeper.get(uri, check=False)
except (KeyError, UnicodeDecodeError):
return False
count = couldBeFeedData(data)
return count
def sortFeeds(feed1Info, feed2Info):
return cmp(feed2Info['headlines_rank'], feed1Info['headlines_rank'])
def getFeedsFromSyndic8(uri):
feeds = []
try:
server = xmlrpclib.Server('http://www.syndic8.com/xmlrpc.php')
feedids = server.syndic8.FindFeeds(uri)
infolist = server.syndic8.GetFeedInfo(feedids, ['headlines_rank','status','dataurl'])
infolist.sort(sortFeeds)
feeds = [f['dataurl'] for f in infolist if f['status']=='Syndicated']
_debuglog('found %s feeds through Syndic8' % len(feeds))
except:
pass
return feeds
def feeds(uri, all=False, querySyndic8=False, _recurs=None):
if _recurs is None: _recurs = [uri]
fulluri = makeFullURI(uri)
try:
data = _gatekeeper.get(fulluri, check=False)
except:
return []
# is this already a feed?
if couldBeFeedData(data):
return [fulluri]
newuri = tryBrokenRedirect(data)
if newuri and newuri not in _recurs:
_recurs.append(newuri)
return feeds(newuri, all=all, querySyndic8=querySyndic8, _recurs=_recurs)
# nope, it's a page, try LINK tags first
_debuglog('looking for LINK tags')
try:
outfeeds = getLinks(data, fulluri)
except:
outfeeds = []
if not outfeeds:
_debuglog('using lxml to look for LINK tags')
try:
outfeeds = getLinksLXML(data, fulluri)
except:
outfeeds = []
_debuglog('found %s feeds through LINK tags' % len(outfeeds))
outfeeds = filter(isFeed, outfeeds)
if all or not outfeeds:
# no LINK tags, look for regular <A> links that point to feeds
_debuglog('no LINK tags, looking at A tags')
try:
links = getALinks(data, fulluri)
except:
links = []
_debuglog('no LINK tags, looking at local links')
locallinks = getLocalLinks(links, fulluri)
# look for obvious feed links on the same server
outfeeds.extend(filter(isFeed, filter(isFeedLink, locallinks)))
if all or not outfeeds:
# look harder for feed links on the same server
outfeeds.extend(filter(isFeed, filter(isXMLRelatedLink, locallinks)))
if all or not outfeeds:
# look for obvious feed links on another server
outfeeds.extend(filter(isFeed, filter(isFeedLink, links)))
if all or not outfeeds:
# look harder for feed links on another server
outfeeds.extend(filter(isFeed, filter(isXMLRelatedLink, links)))
if all or not outfeeds:
_debuglog('no A tags, guessing')
suffixes = [ # filenames used by popular software:
'feed/', # obvious
'atom.xml', # blogger, TypePad
'index.atom', # MT, apparently
'index.rdf', # MT
'rss.xml', # Dave Winer/Manila
'index.xml', # MT
'index.rss' # Slash
]
outfeeds.extend(filter(isFeed, [urlparse.urljoin(fulluri, x) for x in suffixes]))
if (all or not outfeeds) and querySyndic8:
# still no luck, search Syndic8 for feeds (requires xmlrpclib)
_debuglog('still no luck, searching Syndic8')
outfeeds.extend(getFeedsFromSyndic8(uri))
if hasattr(__builtins__, 'set') or __builtins__.has_key('set'):
outfeeds = list(set(outfeeds))
return outfeeds
getFeeds = feeds # backwards-compatibility
def feed(uri):
#todo: give preference to certain feed formats
feedlist = feeds(uri)
if feedlist:
feeds_no_comments = filter(lambda f: 'comments' not in f.lower(), feedlist)
if feeds_no_comments:
return feeds_no_comments[0]
return feedlist[0]
else:
return None
##### test harness ######
def test():
uri = 'http://diveintomark.org/tests/client/autodiscovery/html4-001.html'
failed = []
count = 0
while 1:
data = _gatekeeper.get(uri)
if data.find('Atom autodiscovery test') == -1: break
sys.stdout.write('.')
sys.stdout.flush()
count += 1
links = getLinks(data, uri)
if not links:
print '\n*** FAILED ***', uri, 'could not find link'
failed.append(uri)
elif len(links) > 1:
print '\n*** FAILED ***', uri, 'found too many links'
failed.append(uri)
else:
atomdata = urllib.urlopen(links[0]).read()
if atomdata.find('<link rel="alternate"') == -1:
print '\n*** FAILED ***', uri, 'retrieved something that is not a feed'
failed.append(uri)
else:
backlink = atomdata.split('href="').pop().split('"')[0]
if backlink != uri:
print '\n*** FAILED ***', uri, 'retrieved wrong feed'
failed.append(uri)
if data.find('<link rel="next" href="') == -1: break
uri = urlparse.urljoin(uri, data.split('<link rel="next" href="').pop().split('"')[0])
print
print count, 'tests executed,', len(failed), 'failed'
if __name__ == '__main__':
args = sys.argv[1:]
if args and args[0] == '--debug':
_debug = 1
args.pop(0)
if args:
uri = args[0]
else:
uri = 'http://diveintomark.org/'
if uri == 'test':
test()
else:
print "\n".join(getFeeds(uri))
| mit |
poo12138/gem5-stable | src/arch/x86/isa/insts/x87/stack_management/__init__.py | 91 | 2317 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
categories = ["stack_control",
"clear_state"]
microcode = '''
# X86 microcode
'''
for category in categories:
exec "import %s as cat" % category
microcode += cat.microcode
| bsd-3-clause |
nZac/flask | flask/globals.py | 322 | 1645 | # -*- coding: utf-8 -*-
"""
flask.globals
~~~~~~~~~~~~~
Defines all the global objects that are proxies to the current
active context.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from functools import partial
from werkzeug.local import LocalStack, LocalProxy
_request_ctx_err_msg = '''\
Working outside of request context.
This typically means that you attempted to use functionality that needed
an active HTTP request. Consult the documentation on testing for
information about how to avoid this problem.\
'''
_app_ctx_err_msg = '''\
Working outside of application context.
This typically means that you attempted to use functionality that needed
to interface with the current application object in a way. To solve
this set up an application context with app.app_context(). See the
documentation for more information.\
'''
def _lookup_req_object(name):
top = _request_ctx_stack.top
if top is None:
raise RuntimeError(_request_ctx_err_msg)
return getattr(top, name)
def _lookup_app_object(name):
top = _app_ctx_stack.top
if top is None:
raise RuntimeError(_app_ctx_err_msg)
return getattr(top, name)
def _find_app():
top = _app_ctx_stack.top
if top is None:
raise RuntimeError(_app_ctx_err_msg)
return top.app
# context locals
_request_ctx_stack = LocalStack()
_app_ctx_stack = LocalStack()
current_app = LocalProxy(_find_app)
request = LocalProxy(partial(_lookup_req_object, 'request'))
session = LocalProxy(partial(_lookup_req_object, 'session'))
g = LocalProxy(partial(_lookup_app_object, 'g'))
| bsd-3-clause |
amarian12/p2pool-adaptive-nvc | p2pool/bitcoin/script.py | 282 | 2589 | from p2pool.util import math, pack
def reads_nothing(f):
return None, f
def protoPUSH(length):
return lambda f: pack.read(f, length)
def protoPUSHDATA(size_len):
def _(f):
length_str, f = pack.read(f, size_len)
length = math.string_to_natural(length_str[::-1].lstrip(chr(0)))
data, f = pack.read(f, length)
return data, f
return _
opcodes = {}
for i in xrange(256):
opcodes[i] = 'UNK_' + str(i), reads_nothing
opcodes[0] = 'PUSH', lambda f: ('', f)
for i in xrange(1, 76):
opcodes[i] = 'PUSH', protoPUSH(i)
opcodes[76] = 'PUSH', protoPUSHDATA(1)
opcodes[77] = 'PUSH', protoPUSHDATA(2)
opcodes[78] = 'PUSH', protoPUSHDATA(4)
opcodes[79] = 'PUSH', lambda f: ('\x81', f)
for i in xrange(81, 97):
opcodes[i] = 'PUSH', lambda f, _i=i: (chr(_i - 80), f)
opcodes[172] = 'CHECKSIG', reads_nothing
opcodes[173] = 'CHECKSIGVERIFY', reads_nothing
opcodes[174] = 'CHECKMULTISIG', reads_nothing
opcodes[175] = 'CHECKMULTISIGVERIFY', reads_nothing
def parse(script):
f = script, 0
while pack.size(f):
opcode_str, f = pack.read(f, 1)
opcode = ord(opcode_str)
opcode_name, read_func = opcodes[opcode]
opcode_arg, f = read_func(f)
yield opcode_name, opcode_arg
def get_sigop_count(script):
weights = {
'CHECKSIG': 1,
'CHECKSIGVERIFY': 1,
'CHECKMULTISIG': 20,
'CHECKMULTISIGVERIFY': 20,
}
return sum(weights.get(opcode_name, 0) for opcode_name, opcode_arg in parse(script))
def create_push_script(datums): # datums can be ints or strs
res = []
for datum in datums:
if isinstance(datum, (int, long)):
if datum == -1 or 1 <= datum <= 16:
res.append(chr(datum + 80))
continue
negative = datum < 0
datum = math.natural_to_string(abs(datum))
if datum and ord(datum[0]) & 128:
datum = '\x00' + datum
if negative:
datum = chr(ord(datum[0]) + 128) + datum[1:]
datum = datum[::-1]
if len(datum) < 76:
res.append(chr(len(datum)))
elif len(datum) <= 0xff:
res.append(76)
res.append(chr(len(datum)))
elif len(datum) <= 0xffff:
res.append(77)
res.append(pack.IntType(16).pack(len(datum)))
elif len(datum) <= 0xffffffff:
res.append(78)
res.append(pack.IntType(32).pack(len(datum)))
else:
raise ValueError('string too long')
res.append(datum)
return ''.join(res)
| gpl-3.0 |
Serag8/Bachelor | google_appengine/lib/django-1.5/django/contrib/gis/gdal/base.py | 224 | 1155 | from ctypes import c_void_p
from django.contrib.gis.gdal.error import GDALException
from django.utils import six
class GDALBase(object):
"""
Base object for GDAL objects that has a pointer access property
that controls access to the underlying C pointer.
"""
# Initially the pointer is NULL.
_ptr = None
# Default allowed pointer type.
ptr_type = c_void_p
# Pointer access property.
def _get_ptr(self):
# Raise an exception if the pointer isn't valid don't
# want to be passing NULL pointers to routines --
# that's very bad.
if self._ptr: return self._ptr
else: raise GDALException('GDAL %s pointer no longer valid.' % self.__class__.__name__)
def _set_ptr(self, ptr):
# Only allow the pointer to be set with pointers of the
# compatible type or None (NULL).
if isinstance(ptr, six.integer_types):
self._ptr = self.ptr_type(ptr)
elif ptr is None or isinstance(ptr, self.ptr_type):
self._ptr = ptr
else:
raise TypeError('Incompatible pointer type')
ptr = property(_get_ptr, _set_ptr)
| mit |
zarboz/m8wlv | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
BenHenning/oppia | core/domain/rte_component_registry_test.py | 30 | 7159 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Jeremy Emerson'
import os
import re
import string
from core.domain import obj_services
from core.domain import rte_component_registry
from core.tests import test_utils
import feconf
import schema_utils
import schema_utils_test
import utils
# File names ending in any of these suffixes will be ignored when checking for
# RTE component validity.
IGNORED_FILE_SUFFIXES = ['.pyc', '.DS_Store']
class RteComponentUnitTests(test_utils.GenericTestBase):
"""Tests that all the default RTE comopnents are valid."""
def _is_camel_cased(self, name):
"""Check whether a name is in CamelCase."""
return name and (name[0] in string.ascii_uppercase)
def _is_alphanumeric_string(self, string):
"""Check whether a string is alphanumeric."""
return bool(re.compile("^[a-zA-Z0-9_]+$").match(string))
def _validate_customization_arg_specs(self, customization_arg_specs):
for ca_spec in customization_arg_specs:
self.assertEqual(set(ca_spec.keys()), set([
'name', 'description', 'schema', 'default_value']))
self.assertTrue(isinstance(ca_spec['name'], basestring))
self.assertTrue(self._is_alphanumeric_string(ca_spec['name']))
self.assertTrue(isinstance(ca_spec['description'], basestring))
self.assertGreater(len(ca_spec['description']), 0)
schema_utils_test.validate_schema(ca_spec['schema'])
self.assertEqual(
ca_spec['default_value'],
schema_utils.normalize_against_schema(
ca_spec['default_value'], ca_spec['schema']))
if ca_spec['schema']['type'] == 'custom':
obj_class = obj_services.Registry.get_object_class_by_type(
ca_spec['schema']['obj_type'])
self.assertIsNotNone(obj_class.edit_html_filename)
self.assertIsNotNone(obj_class.edit_js_filename)
self.assertEqual(
ca_spec['default_value'],
obj_class.normalize(ca_spec['default_value']))
def _listdir_omit_ignored(self, dir):
"""List all files and directories within 'dir', omitting the ones whose
name ends in one of the IGNORED_FILE_SUFFIXES."""
names = os.listdir(dir)
for suffix in IGNORED_FILE_SUFFIXES:
names = [name for name in names if not name.endswith(suffix)]
return names
def test_allowed_rich_text_components_and_counts(self):
"""Do sanity checks on the ALLOWED_RTE_EXTENSIONS dict in feconf.py."""
self.assertEqual(
len(rte_component_registry.Registry.get_all_rte_components()),
len(feconf.ALLOWED_RTE_EXTENSIONS))
for (component_name, component_definition) in (
feconf.ALLOWED_RTE_EXTENSIONS.iteritems()):
contents = os.listdir(
os.path.join(os.getcwd(), component_definition['dir']))
self.assertIn('%s.py' % component_name, contents)
def test_image_data_urls_for_rte_components(self):
"""Test the data urls for the RTE component icons."""
component_list = rte_component_registry.Registry._rte_components
for (cpt_name, cpt_spec) in feconf.ALLOWED_RTE_EXTENSIONS.iteritems():
image_filepath = os.path.join(
os.getcwd(), cpt_spec['dir'], '%s.png' % cpt_name)
self.assertEqual(
utils.convert_png_to_data_url(image_filepath),
component_list[cpt_name].icon_data_url)
def test_default_rte_components_are_valid(self):
"""Test that the default RTE components are valid."""
_COMPONENT_CONFIG_SCHEMA = [
('name', basestring), ('category', basestring),
('description', basestring), ('_customization_arg_specs', list)]
for component_id in feconf.ALLOWED_RTE_EXTENSIONS:
# Check that the component id is valid.
self.assertTrue(self._is_camel_cased(component_id))
# Check that the component directory exists.
component_dir = os.path.join(
feconf.RTE_EXTENSIONS_DIR, component_id)
self.assertTrue(os.path.isdir(component_dir))
# In this directory there should be a config .py file, an
# html file, a JS file, a .png file and a protractor.js file.
dir_contents = self._listdir_omit_ignored(component_dir)
self.assertLessEqual(len(dir_contents), 5)
py_file = os.path.join(component_dir, '%s.py' % component_id)
html_file = os.path.join(component_dir, '%s.html' % component_id)
js_file = os.path.join(component_dir, '%s.js' % component_id)
png_file = os.path.join(component_dir, '%s.png' % component_id)
protractor_file = os.path.join(component_dir, 'protractor.js')
self.assertTrue(os.path.isfile(py_file))
self.assertTrue(os.path.isfile(html_file))
self.assertTrue(os.path.isfile(js_file))
self.assertTrue(os.path.isfile(png_file))
self.assertTrue(os.path.isfile(protractor_file))
js_file_content = utils.get_file_contents(js_file)
html_file_content = utils.get_file_contents(html_file)
self.assertIn(
'oppiaNoninteractive%s' % component_id, js_file_content)
self.assertIn(
'<script type="text/ng-template" '
'id="richTextComponent/%s"' % component_id,
html_file_content)
self.assertNotIn('<script>', js_file_content)
self.assertNotIn('</script>', js_file_content)
component = rte_component_registry.Registry._rte_components[
component_id]
# Check that the specified component id is the same as the class
# name.
self.assertTrue(component_id, component.__class__.__name__)
# Check that the configuration file contains the correct
# top-level keys, and that these keys have the correct types.
for item, item_type in _COMPONENT_CONFIG_SCHEMA:
self.assertTrue(isinstance(
getattr(component, item), item_type))
# The string attributes should be non-empty.
if item_type == basestring:
self.assertTrue(getattr(component, item))
self._validate_customization_arg_specs(
component._customization_arg_specs)
| apache-2.0 |
pferreir/indico-plugins | piwik/indico_piwik/controllers.py | 2 | 3059 | # This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from flask import jsonify
from indico.core.config import Config
from MaKaC.webinterface.rh.conferenceModif import RHConferenceModifBase
from indico_piwik.views import WPStatistics
from indico_piwik.reports import (ReportCountries, ReportDevices, ReportDownloads, ReportGeneral, ReportMaterial,
ReportVisitsPerDay)
class RHStatistics(RHConferenceModifBase):
def _checkParams(self, params):
RHConferenceModifBase._checkParams(self, params)
self._params = params
self._params['loading_gif'] = '{}/images/loading.gif'.format(Config.getInstance().getBaseURL())
self._params['report'] = ReportGeneral.get(event_id=params.get('confId'), contrib_id=params.get('contrib_id'),
start_date=params.get('start_date'), end_date=params.get('end_date'))
def _process(self):
return WPStatistics.render_template('statistics.html', self._conf, **self._params)
class RHApiBase(RHConferenceModifBase):
def _checkParams(self, params):
RHConferenceModifBase._checkParams(self, params)
self._report_params = {'start_date': params.get('start_date'),
'end_date': params.get('end_date')}
class RHApiEventBase(RHApiBase):
def _checkParams(self, params):
RHApiBase._checkParams(self, params)
self._report_params['event_id'] = params['confId']
self._report_params['contrib_id'] = params.get('contrib_id')
class RHApiDownloads(RHApiEventBase):
def _checkParams(self, params):
RHApiEventBase._checkParams(self, params)
self._report_params['download_url'] = params['download_url']
def _process(self):
return jsonify(ReportDownloads.get(**self._report_params))
class RHApiEventVisitsPerDay(RHApiEventBase):
def _process(self):
return jsonify(ReportVisitsPerDay.get(**self._report_params))
class RHApiEventGraphCountries(RHApiEventBase):
def _process(self):
return jsonify(ReportCountries.get(**self._report_params))
class RHApiEventGraphDevices(RHApiEventBase):
def _process(self):
return jsonify(ReportDevices.get(**self._report_params))
class RHApiMaterial(RHApiEventBase):
def _process(self):
return jsonify(ReportMaterial.get(**self._report_params))
| gpl-3.0 |
xerosanyam/yowsup | yowsup/layers/protocol_groups/protocolentities/iq_groups_participants_add.py | 61 | 1089 | from yowsup.structs import ProtocolEntity, ProtocolTreeNode
from .iq_groups_participants import ParticipantsGroupsIqProtocolEntity
class AddParticipantsIqProtocolEntity(ParticipantsGroupsIqProtocolEntity):
'''
<iq type="set" id="{{id}}" xmlns="w:g2", to="{{group_jid}}">
<add>
<participant jid="{{jid}}"></participant>
<participant jid="{{jid}}"></participant>
</add>
</iq>
'''
def __init__(self, group_jid, participantList, _id = None):
super(AddParticipantsIqProtocolEntity, self).__init__(group_jid, participantList, "add", _id = _id)
@staticmethod
def fromProtocolTreeNode(node):
entity = super(AddParticipantsIqProtocolEntity, AddParticipantsIqProtocolEntity).fromProtocolTreeNode(node)
entity.__class__ = AddParticipantsIqProtocolEntity
participantList = []
for participantNode in node.getChild("add").getAllChildren():
participantList.append(participantNode["jid"])
entity.setProps(node.getAttributeValue("to"), participantList)
return entity
| gpl-3.0 |
debsankha/networkx | networkx/algorithms/bipartite/matrix.py | 32 | 6621 | # -*- coding: utf-8 -*-
"""
====================
Biadjacency matrices
====================
"""
# Copyright (C) 2013-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import itertools
from networkx.convert import _prep_create_using
from networkx.convert_matrix import _generate_weighted_edges
import networkx as nx
__author__ = """\n""".join(['Jordi Torrents <jtorrents@milnou.net>',
'Aric Hagberg <aric.hagberg@gmail.com>'])
__all__ = ['biadjacency_matrix','from_biadjacency_matrix']
def biadjacency_matrix(G, row_order, column_order=None,
dtype=None, weight='weight', format='csr'):
r"""Return the biadjacency matrix of the bipartite graph G.
Let `G = (U, V, E)` be a bipartite graph with node sets
`U = u_{1},...,u_{r}` and `V = v_{1},...,v_{s}`. The biadjacency
matrix [1]_ is the `r` x `s` matrix `B` in which `b_{i,j} = 1`
if, and only if, `(u_i, v_j) \in E`. If the parameter `weight` is
not `None` and matches the name of an edge attribute, its value is
used instead of 1.
Parameters
----------
G : graph
A NetworkX graph
row_order : list of nodes
The rows of the matrix are ordered according to the list of nodes.
column_order : list, optional
The columns of the matrix are ordered according to the list of nodes.
If column_order is None, then the ordering of columns is arbitrary.
dtype : NumPy data-type, optional
A valid NumPy dtype used to initialize the array. If None, then the
NumPy default is used.
weight : string or None, optional (default='weight')
The edge data key used to provide each value in the matrix.
If None, then each edge has weight 1.
format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'}
The type of the matrix to be returned (default 'csr'). For
some algorithms different implementations of sparse matrices
can perform better. See [2]_ for details.
Returns
-------
M : SciPy sparse matrix
Biadjacency matrix representation of the bipartite graph G.
Notes
-----
No attempt is made to check that the input graph is bipartite.
For directed bipartite graphs only successors are considered as neighbors.
To obtain an adjacency matrix with ones (or weight values) for both
predecessors and successors you have to generate two biadjacency matrices
where the rows of one of them are the columns of the other, and then add
one to the transpose of the other.
See Also
--------
adjacency_matrix
from_biadjacency_matrix
References
----------
.. [1] http://en.wikipedia.org/wiki/Adjacency_matrix#Adjacency_matrix_of_a_bipartite_graph
.. [2] Scipy Dev. References, "Sparse Matrices",
http://docs.scipy.org/doc/scipy/reference/sparse.html
"""
from scipy import sparse
nlen = len(row_order)
if nlen == 0:
raise nx.NetworkXError("row_order is empty list")
if len(row_order) != len(set(row_order)):
msg = "Ambiguous ordering: `row_order` contained duplicates."
raise nx.NetworkXError(msg)
if column_order is None:
column_order = list(set(G) - set(row_order))
mlen = len(column_order)
if len(column_order) != len(set(column_order)):
msg = "Ambiguous ordering: `column_order` contained duplicates."
raise nx.NetworkXError(msg)
row_index = dict(zip(row_order, itertools.count()))
col_index = dict(zip(column_order, itertools.count()))
if G.number_of_edges() == 0:
row,col,data=[],[],[]
else:
row,col,data = zip(*((row_index[u],col_index[v],d.get(weight,1))
for u,v,d in G.edges_iter(row_order,data=True)
if u in row_index and v in col_index))
M = sparse.coo_matrix((data,(row,col)),
shape=(nlen,mlen), dtype=dtype)
try:
return M.asformat(format)
except AttributeError:
raise nx.NetworkXError("Unknown sparse matrix format: %s"%format)
def from_biadjacency_matrix(A, create_using=None, edge_attribute='weight'):
r"""Creates a new bipartite graph from a biadjacency matrix given as a
SciPy sparse matrix.
Parameters
----------
A: scipy sparse matrix
A biadjacency matrix representation of a graph
create_using: NetworkX graph
Use specified graph for result. The default is Graph()
edge_attribute: string
Name of edge attribute to store matrix numeric value. The data will
have the same type as the matrix entry (int, float, (real,imag)).
Notes
-----
The nodes are labeled with the attribute `bipartite` set to an integer
0 or 1 representing membership in part 0 or part 1 of the bipartite graph.
If `create_using` is an instance of :class:`networkx.MultiGraph` or
:class:`networkx.MultiDiGraph` and the entries of `A` are of type ``int``,
then this function returns a multigraph (of the same type as
`create_using`) with parallel edges. In this case, `edge_attribute` will be
ignored.
See Also
--------
biadjacency_matrix
from_numpy_matrix
References
----------
[1] http://en.wikipedia.org/wiki/Adjacency_matrix#Adjacency_matrix_of_a_bipartite_graph
"""
G = _prep_create_using(create_using)
n, m = A.shape
# Make sure we get even the isolated nodes of the graph.
G.add_nodes_from(range(n), bipartite=0)
G.add_nodes_from(range(n,n+m), bipartite=1)
# Create an iterable over (u, v, w) triples and for each triple, add an
# edge from u to v with weight w.
triples = ((u, n+v, d) for (u, v, d) in _generate_weighted_edges(A))
# If the entries in the adjacency matrix are integers and the graph is a
# multigraph, then create parallel edges, each with weight 1, for each
# entry in the adjacency matrix. Otherwise, create one edge for each
# positive entry in the adjacency matrix and set the weight of that edge to
# be the entry in the matrix.
if A.dtype.kind in ('i', 'u') and G.is_multigraph():
chain = itertools.chain.from_iterable
triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples)
G.add_weighted_edges_from(triples, weight=edge_attribute)
return G
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import scipy
except:
raise SkipTest("SciPy not available")
| bsd-3-clause |
bcorbet/SickRage | lib/simplejson/encoder.py | 91 | 16041 | """Implementation of JSONEncoder
"""
import re
try:
from lib.simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii
except ImportError:
c_encode_basestring_ascii = None
try:
from lib.simplejson._speedups import make_encoder as c_make_encoder
except ImportError:
c_make_encoder = None
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
#ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# Assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
#return '\\u{0:04x}'.format(n)
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
#return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = c_encode_basestring_ascii or py_encode_basestring_ascii
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError(repr(o) + " is not JSON serializable")
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
return ''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
if _one_shot and c_make_encoder is not None and not self.indent and not self.sort_keys:
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0)
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, (int, long)):
key = str(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
| gpl-3.0 |
ichu501/WinObjC | deps/3rdparty/icu/icu/source/test/depstest/depstest.py | 189 | 7263 | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2014, International Business Machines
# Corporation and others. All Rights Reserved.
#
# file name: depstest.py
#
# created on: 2011may24
"""ICU dependency tester.
This probably works only on Linux.
The exit code is 0 if everything is fine, 1 for errors, 2 for only warnings.
Sample invocation:
~/svn.icu/trunk/src/source/test/depstest$ ./depstest.py ~/svn.icu/trunk/dbg
"""
__author__ = "Markus W. Scherer"
import glob
import os.path
import subprocess
import sys
import dependencies
_ignored_symbols = set()
_obj_files = {}
_symbols_to_files = {}
_return_value = 0
# Classes with vtables (and thus virtual methods).
_virtual_classes = set()
# Classes with weakly defined destructors.
# nm shows a symbol class of "W" rather than "T".
_weak_destructors = set()
def _ReadObjFile(root_path, library_name, obj_name):
global _ignored_symbols, _obj_files, _symbols_to_files
global _virtual_classes, _weak_destructors
lib_obj_name = library_name + "/" + obj_name
if lib_obj_name in _obj_files:
print "Warning: duplicate .o file " + lib_obj_name
_return_value = 2
return
path = os.path.join(root_path, library_name, obj_name)
nm_result = subprocess.Popen(["nm", "--demangle", "--format=sysv",
"--extern-only", "--no-sort", path],
stdout=subprocess.PIPE).communicate()[0]
obj_imports = set()
obj_exports = set()
for line in nm_result.splitlines():
fields = line.split("|")
if len(fields) == 1: continue
name = fields[0].strip()
# Ignore symbols like '__cxa_pure_virtual',
# 'vtable for __cxxabiv1::__si_class_type_info' or
# 'DW.ref.__gxx_personality_v0'.
if name.startswith("__cxa") or "__cxxabi" in name or "__gxx" in name:
_ignored_symbols.add(name)
continue
type = fields[2].strip()
if type == "U":
obj_imports.add(name)
else:
obj_exports.add(name)
_symbols_to_files[name] = lib_obj_name
# Is this a vtable? E.g., "vtable for icu_49::ByteSink".
if name.startswith("vtable for icu"):
_virtual_classes.add(name[name.index("::") + 2:])
# Is this a destructor? E.g., "icu_49::ByteSink::~ByteSink()".
index = name.find("::~")
if index >= 0 and type == "W":
_weak_destructors.add(name[index + 3:name.index("(", index)])
_obj_files[lib_obj_name] = {"imports": obj_imports, "exports": obj_exports}
def _ReadLibrary(root_path, library_name):
obj_paths = glob.glob(os.path.join(root_path, library_name, "*.o"))
for path in obj_paths:
_ReadObjFile(root_path, library_name, os.path.basename(path))
def _Resolve(name, parents):
global _ignored_symbols, _obj_files, _symbols_to_files, _return_value
item = dependencies.items[name]
item_type = item["type"]
if name in parents:
sys.exit("Error: %s %s has a circular dependency on itself: %s" %
(item_type, name, parents))
# Check if already cached.
exports = item.get("exports")
if exports != None: return item
# Calculcate recursively.
parents.append(name)
imports = set()
exports = set()
system_symbols = item.get("system_symbols")
if system_symbols == None: system_symbols = item["system_symbols"] = set()
files = item.get("files")
if files:
for file_name in files:
obj_file = _obj_files[file_name]
imports |= obj_file["imports"]
exports |= obj_file["exports"]
imports -= exports | _ignored_symbols
deps = item.get("deps")
if deps:
for dep in deps:
dep_item = _Resolve(dep, parents)
# Detect whether this item needs to depend on dep,
# except when this item has no files, that is, when it is just
# a deliberate umbrella group or library.
dep_exports = dep_item["exports"]
dep_system_symbols = dep_item["system_symbols"]
if files and imports.isdisjoint(dep_exports) and imports.isdisjoint(dep_system_symbols):
print "Info: %s %s does not need to depend on %s\n" % (item_type, name, dep)
# We always include the dependency's exports, even if we do not need them
# to satisfy local imports.
exports |= dep_exports
system_symbols |= dep_system_symbols
item["exports"] = exports
item["system_symbols"] = system_symbols
imports -= exports | system_symbols
for symbol in imports:
for file_name in files:
if symbol in _obj_files[file_name]["imports"]:
neededFile = _symbols_to_files.get(symbol)
if neededFile in dependencies.file_to_item:
neededItem = "but %s does not depend on %s (for %s)" % (name, dependencies.file_to_item[neededFile], neededFile)
else:
neededItem = "- is this a new system symbol?"
sys.stderr.write("Error: in %s %s: %s imports %s %s\n" %
(item_type, name, file_name, symbol, neededItem))
_return_value = 1
del parents[-1]
return item
def Process(root_path):
"""Loads dependencies.txt, reads the libraries' .o files, and processes them.
Modifies dependencies.items: Recursively builds each item's system_symbols and exports.
"""
global _ignored_symbols, _obj_files, _return_value
global _virtual_classes, _weak_destructors
dependencies.Load()
for name_and_item in dependencies.items.iteritems():
name = name_and_item[0]
item = name_and_item[1]
system_symbols = item.get("system_symbols")
if system_symbols:
for symbol in system_symbols:
_symbols_to_files[symbol] = name
for library_name in dependencies.libraries:
_ReadLibrary(root_path, library_name)
o_files_set = set(_obj_files.keys())
files_missing_from_deps = o_files_set - dependencies.files
files_missing_from_build = dependencies.files - o_files_set
if files_missing_from_deps:
sys.stderr.write("Error: files missing from dependencies.txt:\n%s\n" %
sorted(files_missing_from_deps))
_return_value = 1
if files_missing_from_build:
sys.stderr.write("Error: files in dependencies.txt but not built:\n%s\n" %
sorted(files_missing_from_build))
_return_value = 1
if not _return_value:
for library_name in dependencies.libraries:
_Resolve(library_name, [])
if not _return_value:
virtual_classes_with_weak_destructors = _virtual_classes & _weak_destructors
if virtual_classes_with_weak_destructors:
sys.stderr.write("Error: Some classes have virtual methods, and "
"an implicit or inline destructor "
"(see ICU ticket #8454 for details):\n%s\n" %
sorted(virtual_classes_with_weak_destructors))
_return_value = 1
def main():
global _return_value
if len(sys.argv) <= 1:
sys.exit(("Command line error: " +
"need one argument with the root path to the built ICU libraries/*.o files."))
Process(sys.argv[1])
if _ignored_symbols:
print "Info: ignored symbols:\n%s" % sorted(_ignored_symbols)
if not _return_value:
print "OK: Specified and actual dependencies match."
else:
print "Error: There were errors, please fix them and re-run. Processing may have terminated abnormally."
return _return_value
if __name__ == "__main__":
sys.exit(main())
| mit |
saukrIppl/seahub | thirdpart/Django-1.8.10-py2.7.egg/django/utils/dates.py | 590 | 2296 | "Commonly-used date structures"
from django.utils.translation import pgettext_lazy, ugettext_lazy as _
WEEKDAYS = {
0: _('Monday'), 1: _('Tuesday'), 2: _('Wednesday'), 3: _('Thursday'), 4: _('Friday'),
5: _('Saturday'), 6: _('Sunday')
}
WEEKDAYS_ABBR = {
0: _('Mon'), 1: _('Tue'), 2: _('Wed'), 3: _('Thu'), 4: _('Fri'),
5: _('Sat'), 6: _('Sun')
}
WEEKDAYS_REV = {
'monday': 0, 'tuesday': 1, 'wednesday': 2, 'thursday': 3, 'friday': 4,
'saturday': 5, 'sunday': 6
}
MONTHS = {
1: _('January'), 2: _('February'), 3: _('March'), 4: _('April'), 5: _('May'), 6: _('June'),
7: _('July'), 8: _('August'), 9: _('September'), 10: _('October'), 11: _('November'),
12: _('December')
}
MONTHS_3 = {
1: _('jan'), 2: _('feb'), 3: _('mar'), 4: _('apr'), 5: _('may'), 6: _('jun'),
7: _('jul'), 8: _('aug'), 9: _('sep'), 10: _('oct'), 11: _('nov'), 12: _('dec')
}
MONTHS_3_REV = {
'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6, 'jul': 7, 'aug': 8,
'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12
}
MONTHS_AP = { # month names in Associated Press style
1: pgettext_lazy('abbrev. month', 'Jan.'),
2: pgettext_lazy('abbrev. month', 'Feb.'),
3: pgettext_lazy('abbrev. month', 'March'),
4: pgettext_lazy('abbrev. month', 'April'),
5: pgettext_lazy('abbrev. month', 'May'),
6: pgettext_lazy('abbrev. month', 'June'),
7: pgettext_lazy('abbrev. month', 'July'),
8: pgettext_lazy('abbrev. month', 'Aug.'),
9: pgettext_lazy('abbrev. month', 'Sept.'),
10: pgettext_lazy('abbrev. month', 'Oct.'),
11: pgettext_lazy('abbrev. month', 'Nov.'),
12: pgettext_lazy('abbrev. month', 'Dec.')
}
MONTHS_ALT = { # required for long date representation by some locales
1: pgettext_lazy('alt. month', 'January'),
2: pgettext_lazy('alt. month', 'February'),
3: pgettext_lazy('alt. month', 'March'),
4: pgettext_lazy('alt. month', 'April'),
5: pgettext_lazy('alt. month', 'May'),
6: pgettext_lazy('alt. month', 'June'),
7: pgettext_lazy('alt. month', 'July'),
8: pgettext_lazy('alt. month', 'August'),
9: pgettext_lazy('alt. month', 'September'),
10: pgettext_lazy('alt. month', 'October'),
11: pgettext_lazy('alt. month', 'November'),
12: pgettext_lazy('alt. month', 'December')
}
| apache-2.0 |
retr0h/ansible-modules-core | system/user.py | 15 | 52271 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Stephen Fromm <sfromm@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: user
author: Stephen Fromm
version_added: "0.2"
short_description: Manage user accounts
requirements: [ useradd, userdel, usermod ]
description:
- Manage user accounts and user attributes.
options:
name:
required: true
aliases: [ "user" ]
description:
- Name of the user to create, remove or modify.
comment:
required: false
description:
- Optionally sets the description (aka I(GECOS)) of user account.
uid:
required: false
description:
- Optionally sets the I(UID) of the user.
non_unique:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- Optionally when used with the -u option, this option allows to
change the user ID to a non-unique value.
version_added: "1.1"
group:
required: false
description:
- Optionally sets the user's primary group (takes a group name).
groups:
required: false
description:
- Puts the user in this comma-delimited list of groups. When set to
the empty string ('groups='), the user is removed from all groups
except the primary group.
append:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- If C(yes), will only add groups, not set them to just the list
in I(groups).
shell:
required: false
description:
- Optionally set the user's shell.
home:
required: false
description:
- Optionally set the user's home directory.
password:
required: false
description:
- Optionally set the user's password to this crypted value. See
the user example in the github examples directory for what this looks
like in a playbook. The `FAQ <http://docs.ansible.com/faq.html#how-do-i-generate-crypted-passwords-for-the-user-module>`_
contains details on various ways to generate these password values.
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the account should exist. When C(absent), removes
the user account.
createhome:
required: false
default: "yes"
choices: [ "yes", "no" ]
description:
- Unless set to C(no), a home directory will be made for the user
when the account is created or if the home directory does not
exist.
move_home:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- If set to C(yes) when used with C(home=), attempt to move the
user's home directory to the specified directory if it isn't there
already.
system:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- When creating an account, setting this to C(yes) makes the user a
system account. This setting cannot be changed on existing users.
force:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- When used with C(state=absent), behavior is as with
C(userdel --force).
login_class:
required: false
description:
- Optionally sets the user's login class for FreeBSD, OpenBSD and NetBSD systems.
remove:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- When used with C(state=absent), behavior is as with
C(userdel --remove).
generate_ssh_key:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "0.9"
description:
- Whether to generate a SSH key for the user in question.
This will B(not) overwrite an existing SSH key.
ssh_key_bits:
required: false
default: 2048
version_added: "0.9"
description:
- Optionally specify number of bits in SSH key to create.
ssh_key_type:
required: false
default: rsa
version_added: "0.9"
description:
- Optionally specify the type of SSH key to generate.
Available SSH key types will depend on implementation
present on target host.
ssh_key_file:
required: false
default: $HOME/.ssh/id_rsa
version_added: "0.9"
description:
- Optionally specify the SSH key filename.
ssh_key_comment:
required: false
default: ansible-generated
version_added: "0.9"
description:
- Optionally define the comment for the SSH key.
ssh_key_passphrase:
required: false
version_added: "0.9"
description:
- Set a passphrase for the SSH key. If no
passphrase is provided, the SSH key will default to
having no passphrase.
update_password:
required: false
default: always
choices: ['always', 'on_create']
version_added: "1.3"
description:
- C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users.
'''
EXAMPLES = '''
# Add the user 'johnd' with a specific uid and a primary group of 'admin'
- user: name=johnd comment="John Doe" uid=1040 group=admin
# Add the user 'james' with a bash shell, appending the group 'admins' and 'developers' to the user's groups
- user: name=james shell=/bin/bash groups=admins,developers append=yes
# Remove the user 'johnd'
- user: name=johnd state=absent remove=yes
# Create a 2048-bit SSH key for user jsmith
- user: name=jsmith generate_ssh_key=yes ssh_key_bits=2048
'''
import os
import pwd
import grp
import syslog
import platform
try:
import spwd
HAVE_SPWD=True
except:
HAVE_SPWD=False
class User(object):
"""
This is a generic User manipulation class that is subclassed
based on platform.
A subclass may wish to override the following action methods:-
- create_user()
- remove_user()
- modify_user()
- ssh_key_gen()
- ssh_key_fingerprint()
- user_exists()
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
SHADOWFILE = '/etc/shadow'
def __new__(cls, *args, **kwargs):
return load_platform_subclass(User, args, kwargs)
def __init__(self, module):
self.module = module
self.state = module.params['state']
self.name = module.params['name']
self.uid = module.params['uid']
self.non_unique = module.params['non_unique']
self.group = module.params['group']
self.groups = module.params['groups']
self.comment = module.params['comment']
self.home = module.params['home']
self.shell = module.params['shell']
self.password = module.params['password']
self.force = module.params['force']
self.remove = module.params['remove']
self.createhome = module.params['createhome']
self.move_home = module.params['move_home']
self.system = module.params['system']
self.login_class = module.params['login_class']
self.append = module.params['append']
self.sshkeygen = module.params['generate_ssh_key']
self.ssh_bits = module.params['ssh_key_bits']
self.ssh_type = module.params['ssh_key_type']
self.ssh_comment = module.params['ssh_key_comment']
self.ssh_passphrase = module.params['ssh_key_passphrase']
self.update_password = module.params['update_password']
if module.params['ssh_key_file'] is not None:
self.ssh_file = module.params['ssh_key_file']
else:
self.ssh_file = os.path.join('.ssh', 'id_%s' % self.ssh_type)
# select whether we dump additional debug info through syslog
self.syslogging = False
def execute_command(self, cmd):
if self.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd))
return self.module.run_command(cmd)
def remove_user_userdel(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.force:
cmd.append('-f')
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user_useradd(self, command_name='useradd'):
cmd = [self.module.get_bin_path(command_name, True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
elif self.group_exists(self.name):
# use the -N option (no user group) if a group already
# exists with the same name as the user to prevent
# errors from useradd trying to create a group when
# USERGROUPS_ENAB is set in /etc/login.defs.
cmd.append('-N')
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.createhome:
cmd.append('-m')
else:
cmd.append('-M')
if self.system:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def _check_usermod_append(self):
# check if this version of usermod can append groups
usermod_path = self.module.get_bin_path('usermod', True)
# for some reason, usermod --help cannot be used by non root
# on RH/Fedora, due to lack of execute bit for others
if not os.access(usermod_path, os.X_OK):
return False
cmd = [usermod_path]
cmd.append('--help')
rc, data1, data2 = self.execute_command(cmd)
helpout = data1 + data2
# check if --append exists
lines = helpout.split('\n')
for line in lines:
if line.strip().startswith('-a, --append'):
return True
return False
def modify_user_usermod(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
has_append = self._check_usermod_append()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set(remove_existing=False)
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
if has_append:
cmd.append('-a')
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
if self.append and not has_append:
cmd.append('-A')
cmd.append(','.join(group_diff))
else:
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
cmd.append('-d')
cmd.append(self.home)
if self.move_home:
cmd.append('-m')
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
elif self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
def group_exists(self,group):
try:
if group.isdigit():
if grp.getgrgid(int(group)):
return True
else:
if grp.getgrnam(group):
return True
except KeyError:
return False
def group_info(self,group):
if not self.group_exists(group):
return False
if group.isdigit():
return list(grp.getgrgid(group))
else:
return list(grp.getgrnam(group))
def get_groups_set(self, remove_existing=True):
if self.groups is None:
return None
info = self.user_info()
groups = set(filter(None, self.groups.split(',')))
for g in set(groups):
if not self.group_exists(g):
self.module.fail_json(msg="Group %s does not exist" % (g))
if info and remove_existing and self.group_info(g)[2] == info[3]:
groups.remove(g)
return groups
def user_group_membership(self):
groups = []
info = self.get_pwd_info()
for group in grp.getgrall():
if self.name in group.gr_mem and not info[3] == group.gr_gid:
groups.append(group[0])
return groups
def user_exists(self):
try:
if pwd.getpwnam(self.name):
return True
except KeyError:
return False
def get_pwd_info(self):
if not self.user_exists():
return False
return list(pwd.getpwnam(self.name))
def user_info(self):
if not self.user_exists():
return False
info = self.get_pwd_info()
if len(info[1]) == 1 or len(info[1]) == 0:
info[1] = self.user_password()
return info
def user_password(self):
passwd = ''
if HAVE_SPWD:
try:
passwd = spwd.getspnam(self.name)[1]
except KeyError:
return passwd
if not self.user_exists():
return passwd
else:
# Read shadow file for user's encrypted password string
if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
for line in open(self.SHADOWFILE).readlines():
if line.startswith('%s:' % self.name):
passwd = line.split(':')[1]
return passwd
def get_ssh_key_path(self):
info = self.user_info()
if os.path.isabs(self.ssh_file):
ssh_key_file = self.ssh_file
else:
ssh_key_file = os.path.join(info[5], self.ssh_file)
return ssh_key_file
def ssh_key_gen(self):
info = self.user_info()
if not os.path.exists(info[5]):
return (1, '', 'User %s home directory does not exist' % self.name)
ssh_key_file = self.get_ssh_key_path()
ssh_dir = os.path.dirname(ssh_key_file)
if not os.path.exists(ssh_dir):
try:
os.mkdir(ssh_dir, 0700)
os.chown(ssh_dir, info[2], info[3])
except OSError, e:
return (1, '', 'Failed to create %s: %s' % (ssh_dir, str(e)))
if os.path.exists(ssh_key_file):
return (None, 'Key already exists', '')
cmd = [self.module.get_bin_path('ssh-keygen', True)]
cmd.append('-t')
cmd.append(self.ssh_type)
cmd.append('-b')
cmd.append(self.ssh_bits)
cmd.append('-C')
cmd.append(self.ssh_comment)
cmd.append('-f')
cmd.append(ssh_key_file)
cmd.append('-N')
if self.ssh_passphrase is not None:
cmd.append(self.ssh_passphrase)
else:
cmd.append('')
(rc, out, err) = self.execute_command(cmd)
if rc == 0:
# If the keys were successfully created, we should be able
# to tweak ownership.
os.chown(ssh_key_file, info[2], info[3])
os.chown('%s.pub' % ssh_key_file, info[2], info[3])
return (rc, out, err)
def ssh_key_fingerprint(self):
ssh_key_file = self.get_ssh_key_path()
if not os.path.exists(ssh_key_file):
return (1, 'SSH Key file %s does not exist' % ssh_key_file, '')
cmd = [ self.module.get_bin_path('ssh-keygen', True) ]
cmd.append('-l')
cmd.append('-f')
cmd.append(ssh_key_file)
return self.execute_command(cmd)
def get_ssh_public_key(self):
ssh_public_key_file = '%s.pub' % self.get_ssh_key_path()
try:
f = open(ssh_public_key_file)
ssh_public_key = f.read().strip()
f.close()
except IOError:
return None
return ssh_public_key
def create_user(self):
# by default we use the create_user_useradd method
return self.create_user_useradd()
def remove_user(self):
# by default we use the remove_user_userdel method
return self.remove_user_userdel()
def modify_user(self):
# by default we use the modify_user_usermod method
return self.modify_user_usermod()
def create_homedir(self, path):
if not os.path.exists(path):
# use /etc/skel if possible
if os.path.exists('/etc/skel'):
try:
shutil.copytree('/etc/skel', path, symlinks=True)
except OSError, e:
self.module.exit_json(failed=True, msg="%s" % e)
else:
try:
os.makedirs(path)
except OSError, e:
self.module.exit_json(failed=True, msg="%s" % e)
def chown_homedir(self, uid, gid, path):
try:
os.chown(path, uid, gid)
for root, dirs, files in os.walk(path):
for d in dirs:
os.chown(path, uid, gid)
for f in files:
os.chown(os.path.join(root, f), uid, gid)
except OSError, e:
self.module.exit_json(failed=True, msg="%s" % e)
# ===========================================
class FreeBsdUser(User):
"""
This is a FreeBSD User manipulation class - it uses the pw command
to manipulate the user database, followed by the chpass command
to change the password.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'FreeBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
def remove_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'userdel',
'-n',
self.name
]
if self.remove:
cmd.append('-r')
return self.execute_command(cmd)
def create_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'useradd',
'-n',
self.name,
]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.createhome:
cmd.append('-m')
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
# system cannot be handled currently - should we error if its requested?
# create the user
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
# we have to set the password in a second command
if self.password is not None:
cmd = [
self.module.get_bin_path('chpass', True),
'-p',
self.password,
self.name
]
return self.execute_command(cmd)
return (rc, out, err)
def modify_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'usermod',
'-n',
self.name
]
cmd_len = len(cmd)
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.groups is not None:
current_groups = self.user_group_membership()
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
groups_need_mod = False
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
new_groups = groups
if self.append:
new_groups = groups | set(current_groups)
cmd.append(','.join(new_groups))
# modify the user if cmd will do anything
if cmd_len != len(cmd):
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
else:
(rc, out, err) = (None, '', '')
# we have to set the password in a second command
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd = [
self.module.get_bin_path('chpass', True),
'-p',
self.password,
self.name
]
return self.execute_command(cmd)
return (rc, out, err)
# ===========================================
class OpenBSDUser(User):
"""
This is a OpenBSD User manipulation class.
Main differences are that OpenBSD:-
- has no concept of "system" account.
- has no force delete user
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'OpenBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.createhome:
cmd.append('-m')
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user_userdel(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups_option = '-G'
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_option = '-S'
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append(groups_option)
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
# find current login class
user_login_class = None
userinfo_cmd = [self.module.get_bin_path('userinfo', True), self.name]
(rc, out, err) = self.execute_command(userinfo_cmd)
for line in out.splitlines():
tokens = line.split()
if tokens[0] == 'class' and len(tokens) == 2:
user_login_class = tokens[1]
# act only if login_class change
if self.login_class != user_login_class:
cmd.append('-L')
cmd.append(self.login_class)
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
elif self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class NetBSDUser(User):
"""
This is a NetBSD User manipulation class.
Main differences are that NetBSD:-
- has no concept of "system" account.
- has no force delete user
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'NetBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
if len(groups) > 16:
self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups))
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.createhome:
cmd.append('-m')
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user_userdel(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups = set(current_groups).union(groups)
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
if len(groups) > 16:
self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups))
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
elif self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class SunOS(User):
"""
This is a SunOS User manipulation class - The main difference between
this class and the generic user class is that Solaris-type distros
don't support the concept of a "system" account and we need to
edit the /etc/shadow file manually to set a password. (Ugh)
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'SunOS'
distribution = None
SHADOWFILE = '/etc/shadow'
def remove_user(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.createhome:
cmd.append('-m')
cmd.append(self.name)
if self.module.check_mode:
return (0, '', '')
else:
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
# we have to set the password by editing the /etc/shadow file
if self.password is not None:
try:
lines = []
for line in open(self.SHADOWFILE, 'rb').readlines():
fields = line.strip().split(':')
if not fields[0] == self.name:
lines.append(line)
continue
fields[1] = self.password
fields[2] = str(int(time.time() / 86400))
line = ':'.join(fields)
lines.append('%s\n' % line)
open(self.SHADOWFILE, 'w+').writelines(lines)
except Exception, err:
self.module.fail_json(msg="failed to update users password: %s" % str(err))
return (rc, out, err)
def modify_user_usermod(self):
cmd = [self.module.get_bin_path('usermod', True)]
cmd_len = len(cmd)
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
groups_need_mod = False
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
new_groups = groups
if self.append:
new_groups.extend(current_groups)
cmd.append(','.join(new_groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.module.check_mode:
return (0, '', '')
else:
# modify the user if cmd will do anything
if cmd_len != len(cmd):
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
else:
(rc, out, err) = (None, '', '')
# we have to set the password by editing the /etc/shadow file
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
try:
lines = []
for line in open(self.SHADOWFILE, 'rb').readlines():
fields = line.strip().split(':')
if not fields[0] == self.name:
lines.append(line)
continue
fields[1] = self.password
fields[2] = str(int(time.time() / 86400))
line = ':'.join(fields)
lines.append('%s\n' % line)
open(self.SHADOWFILE, 'w+').writelines(lines)
rc = 0
except Exception, err:
self.module.fail_json(msg="failed to update users password: %s" % str(err))
return (rc, out, err)
# ===========================================
class AIX(User):
"""
This is a AIX User manipulation class.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'AIX'
distribution = None
SHADOWFILE = '/etc/security/passwd'
def remove_user(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user_useradd(self, command_name='useradd'):
cmd = [self.module.get_bin_path(command_name, True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.createhome:
cmd.append('-m')
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
# set password with chpasswd
if self.password is not None:
cmd = []
cmd.append('echo "'+self.name+':'+self.password+'" |')
cmd.append(self.module.get_bin_path('chpasswd', True))
cmd.append('-e')
cmd.append('-c')
self.execute_command(' '.join(cmd))
return (rc, out, err)
def modify_user_usermod(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
# skip if no changes to be made
if len(cmd) == 1:
(rc, out, err) = (None, '', '')
elif self.module.check_mode:
return (True, '', '')
else:
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
# set password with chpasswd
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd = []
cmd.append('echo "'+self.name+':'+self.password+'" |')
cmd.append(self.module.get_bin_path('chpasswd', True))
cmd.append('-e')
cmd.append('-c')
(rc2, out2, err2) = self.execute_command(' '.join(cmd))
else:
(rc2, out2, err2) = (None, '', '')
if rc != None:
return (rc, out+out2, err+err2)
else:
return (rc2, out+out2, err+err2)
# ===========================================
def main():
ssh_defaults = {
'bits': '2048',
'type': 'rsa',
'passphrase': None,
'comment': 'ansible-generated'
}
module = AnsibleModule(
argument_spec = dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, aliases=['user'], type='str'),
uid=dict(default=None, type='str'),
non_unique=dict(default='no', type='bool'),
group=dict(default=None, type='str'),
groups=dict(default=None, type='str'),
comment=dict(default=None, type='str'),
home=dict(default=None, type='str'),
shell=dict(default=None, type='str'),
password=dict(default=None, type='str'),
login_class=dict(default=None, type='str'),
# following options are specific to userdel
force=dict(default='no', type='bool'),
remove=dict(default='no', type='bool'),
# following options are specific to useradd
createhome=dict(default='yes', type='bool'),
system=dict(default='no', type='bool'),
# following options are specific to usermod
move_home=dict(default='no', type='bool'),
append=dict(default='no', type='bool'),
# following are specific to ssh key generation
generate_ssh_key=dict(type='bool'),
ssh_key_bits=dict(default=ssh_defaults['bits'], type='str'),
ssh_key_type=dict(default=ssh_defaults['type'], type='str'),
ssh_key_file=dict(default=None, type='str'),
ssh_key_comment=dict(default=ssh_defaults['comment'], type='str'),
ssh_key_passphrase=dict(default=None, type='str'),
update_password=dict(default='always',choices=['always','on_create'],type='str')
),
supports_check_mode=True
)
user = User(module)
if user.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'User instantiated - platform %s' % user.platform)
if user.distribution:
syslog.syslog(syslog.LOG_NOTICE, 'User instantiated - distribution %s' % user.distribution)
rc = None
out = ''
err = ''
result = {}
result['name'] = user.name
result['state'] = user.state
if user.state == 'absent':
if user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = user.remove_user()
if rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
result['force'] = user.force
result['remove'] = user.remove
elif user.state == 'present':
if not user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = user.create_user()
result['system'] = user.system
result['createhome'] = user.createhome
else:
# modify user (note: this function is check mode aware)
(rc, out, err) = user.modify_user()
result['append'] = user.append
result['move_home'] = user.move_home
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if user.password is not None:
result['password'] = 'NOT_LOGGING_PASSWORD'
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
if user.user_exists():
info = user.user_info()
if info == False:
result['msg'] = "failed to look up user name: %s" % user.name
result['failed'] = True
result['uid'] = info[2]
result['group'] = info[3]
result['comment'] = info[4]
result['home'] = info[5]
result['shell'] = info[6]
result['uid'] = info[2]
if user.groups is not None:
result['groups'] = user.groups
# deal with ssh key
if user.sshkeygen:
(rc, out, err) = user.ssh_key_gen()
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if rc == 0:
result['changed'] = True
(rc, out, err) = user.ssh_key_fingerprint()
if rc == 0:
result['ssh_fingerprint'] = out.strip()
else:
result['ssh_fingerprint'] = err.strip()
result['ssh_key_file'] = user.get_ssh_key_path()
result['ssh_public_key'] = user.get_ssh_public_key()
# handle missing homedirs
info = user.user_info()
if user.home is None:
user.home = info[5]
if not os.path.exists(user.home) and user.createhome:
if not module.check_mode:
user.create_homedir(user.home)
user.chown_homedir(info[2], info[3], user.home)
result['changed'] = True
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
pthaike/SFrame | cxxtest/python/setup.py | 59 | 1900 | #-------------------------------------------------------------------------
# CxxTest: A lightweight C++ unit testing library.
# Copyright (c) 2008 Sandia Corporation.
# This software is distributed under the LGPL License v3
# For more information, see the COPYING file in the top CxxTest directory.
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
#-------------------------------------------------------------------------
"""
Script to generate the installer for cxxtest.
"""
classifiers = """\
Development Status :: 4 - Beta
Intended Audience :: End Users/Desktop
License :: OSI Approved :: LGPL License
Natural Language :: English
Operating System :: Microsoft :: Windows
Operating System :: Unix
Programming Language :: Python
Topic :: Software Development :: Libraries :: Python Modules
"""
import os
import sys
from os.path import realpath, dirname
if sys.version_info >= (3,0):
sys.path.insert(0, dirname(realpath(__file__))+os.sep+'python3')
os.chdir('python3')
import cxxtest
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
doclines = cxxtest.__doc__.split("\n")
setup(name="cxxtest",
version=cxxtest.__version__,
maintainer=cxxtest.__maintainer__,
maintainer_email=cxxtest.__maintainer_email__,
url = cxxtest.__url__,
license = cxxtest.__license__,
platforms = ["any"],
description = doclines[0],
classifiers = filter(None, classifiers.split("\n")),
long_description = "\n".join(doclines[2:]),
packages=['cxxtest'],
keywords=['utility'],
scripts=['scripts/cxxtestgen']
#
# The entry_points option is not supported by distutils.core
#
#entry_points="""
#[console_scripts]
#cxxtestgen = cxxtest.cxxtestgen:main
#"""
)
| bsd-3-clause |
dischinator/pyload | module/plugins/hoster/DataportCz.py | 5 | 2321 | # -*- coding: utf-8 -*-
from module.plugins.internal.SimpleHoster import SimpleHoster
class DataportCz(SimpleHoster):
__name__ = "DataportCz"
__type__ = "hoster"
__version__ = "0.46"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?dataport\.cz/file/(.+)'
__config__ = [("activated" , "bool", "Activated" , True),
("use_premium" , "bool", "Use premium account if available" , True),
("fallback" , "bool", "Fallback to free download if premium fails" , True),
("chk_filesize", "bool", "Check file size" , True),
("max_wait" , "int" , "Reconnect if waiting time is greater than minutes", 10 )]
__description__ = """Dataport.cz hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
NAME_PATTERN = r'<span itemprop="name">(?P<N>.+?)</span>'
SIZE_PATTERN = r'<td class="fil">Velikost</td>\s*<td>(?P<S>[^<]+)</td>'
OFFLINE_PATTERN = r'<h2>Soubor nebyl nalezen</h2>'
CAPTCHA_PATTERN = r'<section id="captcha_bg">\s*<img src="(.*?)"'
FREE_SLOTS_PATTERN = ur'Počet volných slotů: <span class="darkblue">(\d+)</span><br />'
def handle_free(self, pyfile):
captchas = {'1': "jkeG", '2': "hMJQ", '3': "vmEK", '4': "ePQM", '5': "blBd"}
action, inputs = self.parse_html_form('free_download_form')
self.log_debug(action, inputs)
if not action or not inputs:
self.error(_("free_download_form"))
if "captchaId" in inputs and inputs['captchaId'] in captchas:
inputs['captchaCode'] = captchas[inputs['captchaId']]
else:
self.error(_("Captcha not found"))
self.download("http://www.dataport.cz%s" % action, post=inputs)
check = self.scan_download({'captcha': 'alert("\u0160patn\u011b opsan\u00fd k\u00f3d z obr\u00e1zu");',
'slot' : 'alert("Je n\u00e1m l\u00edto, ale moment\u00e1ln\u011b nejsou'})
if check == "captcha":
self.retry_captcha()
elif check == "slot":
self.log_debug("No free slots - wait 60s and retry")
self.retry(wait=60)
| gpl-3.0 |
apache/airflow | airflow/contrib/operators/gcs_to_bq.py | 3 | 1695 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use :mod:`airflow.providers.google.cloud.transfers.gcs_to_bigquery`."""
import warnings
from airflow.providers.google.cloud.transfers.gcs_to_bigquery import GCSToBigQueryOperator
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.cloud.transfers.gcs_to_bigquery`.",
DeprecationWarning,
stacklevel=2,
)
class GoogleCloudStorageToBigQueryOperator(GCSToBigQueryOperator):
"""
This class is deprecated.
Please use `airflow.providers.google.cloud.transfers.gcs_to_bq.GCSToBigQueryOperator`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.google.cloud.transfers.gcs_to_bq.GCSToBigQueryOperator`.""",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
| apache-2.0 |
CloudWareChile/OpenChile | openerp/addons/l10n_fr/__init__.py | 14 | 1444 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import l10n_fr
import report
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
anomeome/source | tools/b43-tools/files/b43-fwsquash.py | 494 | 4767 | #!/usr/bin/env python
#
# b43 firmware file squasher
# Removes unnecessary firmware files
#
# Copyright (c) 2009 Michael Buesch <mb@bu3sch.de>
#
# Licensed under the GNU/GPL version 2 or (at your option) any later version.
#
import sys
import os
def usage():
print("Usage: %s PHYTYPES COREREVS /path/to/extracted/firmware" % sys.argv[0])
print("")
print("PHYTYPES is a comma separated list of:")
print("A => A-PHY")
print("AG => Dual A-PHY G-PHY")
print("G => G-PHY")
print("LP => LP-PHY")
print("N => N-PHY")
print("HT => HT-PHY")
print("LCN => LCN-PHY")
print("LCN40 => LCN40-PHY")
print("AC => AC-PHY")
print("")
print("COREREVS is a comma separated list of core revision numbers.")
if len(sys.argv) != 4:
usage()
sys.exit(1)
phytypes = sys.argv[1]
corerevs = sys.argv[2]
fwpath = sys.argv[3]
phytypes = phytypes.split(',')
try:
corerevs = map(lambda r: int(r), corerevs.split(','))
except ValueError:
print("ERROR: \"%s\" is not a valid COREREVS string\n" % corerevs)
usage()
sys.exit(1)
fwfiles = os.listdir(fwpath)
fwfiles = filter(lambda str: str.endswith(".fw"), fwfiles)
if not fwfiles:
print("ERROR: No firmware files found in %s" % fwpath)
sys.exit(1)
required_fwfiles = []
def revs_match(revs_a, revs_b):
for rev in revs_a:
if rev in revs_b:
return True
return False
def phytypes_match(types_a, types_b):
for type in types_a:
type = type.strip().upper()
if type in types_b:
return True
return False
revmapping = {
"ucode2.fw" : ( (2,3,), ("G",), ),
"ucode4.fw" : ( (4,), ("G",), ),
"ucode5.fw" : ( (5,6,7,8,9,10,), ("G","A","AG",), ),
"ucode11.fw" : ( (11,12,), ("N",), ),
"ucode13.fw" : ( (13,), ("LP","G",), ),
"ucode14.fw" : ( (14,), ("LP",), ),
"ucode15.fw" : ( (15,), ("LP",), ),
"ucode16_mimo.fw" : ( (16,17,18,19,23,), ("N",), ),
# "ucode16_lp.fw" : ( (16,17,18,19,), ("LP",), ),
"ucode24_lcn.fw" : ( (24,), ("LCN",), ),
"ucode25_mimo.fw" : ( (25,28,), ("N",), ),
"ucode25_lcn.fw" : ( (25,28,), ("LCN",), ),
"ucode26_mimo.fw" : ( (26,), ("HT",), ),
"ucode29_mimo.fw" : ( (29,), ("HT",), ),
"ucode30_mimo.fw" : ( (30,), ("N",), ),
"ucode33_lcn40.fw" : ( (33,), ("LCN40",), ),
"ucode40.fw" : ( (40,), ("AC",), ),
"ucode42.fw" : ( (42,), ("AC",), ),
"pcm4.fw" : ( (1,2,3,4,), ("G",), ),
"pcm5.fw" : ( (5,6,7,8,9,10,), ("G","A","AG",), ),
}
initvalmapping = {
"a0g1initvals5.fw" : ( (5,6,7,8,9,10,), ("AG",), ),
"a0g0initvals5.fw" : ( (5,6,7,8,9,10,), ("A", "AG",), ),
"b0g0initvals2.fw" : ( (2,4,), ("G",), ),
"b0g0initvals5.fw" : ( (5,6,7,8,9,10,), ("G",), ),
"b0g0initvals13.fw" : ( (13,), ("G",), ),
"n0initvals11.fw" : ( (11,12,), ("N",), ),
"n0initvals16.fw" : ( (16,17,18,23,), ("N",), ),
"n0initvals24.fw" : ( (24,), ("N",), ),
"n0initvals25.fw" : ( (25,28,), ("N",), ),
"n16initvals30.fw" : ( (30,), ("N",), ),
"lp0initvals13.fw" : ( (13,), ("LP",), ),
"lp0initvals14.fw" : ( (14,), ("LP",), ),
"lp0initvals15.fw" : ( (15,), ("LP",), ),
# "lp0initvals16.fw" : ( (16,17,18,), ("LP",), ),
"lcn0initvals24.fw" : ( (24,), ("LCN",), ),
"ht0initvals26.fw" : ( (26,), ("HT",), ),
"ht0initvals29.fw" : ( (29,), ("HT",), ),
"lcn400initvals33.fw" : ( (33,), ("LCN40",), ),
"ac0initvals40.fw" : ( (40,), ("AC",), ),
"ac1initvals42.fw" : ( (42,), ("AC",), ),
"a0g1bsinitvals5.fw" : ( (5,6,7,8,9,10,), ("AG",), ),
"a0g0bsinitvals5.fw" : ( (5,6,7,8,9,10,), ("A", "AG"), ),
"b0g0bsinitvals5.fw" : ( (5,6,7,8,9,10,), ("G",), ),
"n0bsinitvals11.fw" : ( (11,12,), ("N",), ),
"n0bsinitvals16.fw" : ( (16,17,18,23,), ("N",), ),
"n0bsinitvals24.fw" : ( (24,), ("N",), ),
"n0bsinitvals25.fw" : ( (25,28,), ("N",), ),
"n16bsinitvals30.fw" : ( (30,), ("N",), ),
"lp0bsinitvals13.fw" : ( (13,), ("LP",), ),
"lp0bsinitvals14.fw" : ( (14,), ("LP",), ),
"lp0bsinitvals15.fw" : ( (15,), ("LP",), ),
# "lp0bsinitvals16.fw" : ( (16,17,18,), ("LP",), ),
"lcn0bsinitvals24.fw" : ( (24,), ("LCN",), ),
"ht0bsinitvals26.fw" : ( (26,), ("HT",), ),
"ht0bsinitvals29.fw" : ( (29,), ("HT",), ),
"lcn400bsinitvals33.fw" : ( (33,), ("LCN40",), ),
"ac0bsinitvals40.fw" : ( (40,), ("AC",), ),
"ac1bsinitvals42.fw" : ( (42,), ("AC",), ),
}
for f in fwfiles:
if f in revmapping:
if revs_match(corerevs, revmapping[f][0]) and\
phytypes_match(phytypes, revmapping[f][1]):
required_fwfiles += [f]
continue
if f in initvalmapping:
if revs_match(corerevs, initvalmapping[f][0]) and\
phytypes_match(phytypes, initvalmapping[f][1]):
required_fwfiles += [f]
continue
print("WARNING: Firmware file %s not found in the mapping lists" % f)
for f in fwfiles:
if f not in required_fwfiles:
print("Deleting %s" % f)
os.unlink(fwpath + '/' + f)
| gpl-2.0 |
ncliam/serverpos | openerp/addons/base_iban/__init__.py | 447 | 1073 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base_iban
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
pittacus/shadowsocks | shadowsocks/crypto/openssl.py | 1038 | 5414 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
from shadowsocks import common
from shadowsocks.crypto import util
__all__ = ['ciphers']
libcrypto = None
loaded = False
buf_size = 2048
def load_openssl():
global loaded, libcrypto, buf
libcrypto = util.find_library(('crypto', 'eay32'),
'EVP_get_cipherbyname',
'libcrypto')
if libcrypto is None:
raise Exception('libcrypto(OpenSSL) not found')
libcrypto.EVP_get_cipherbyname.restype = c_void_p
libcrypto.EVP_CIPHER_CTX_new.restype = c_void_p
libcrypto.EVP_CipherInit_ex.argtypes = (c_void_p, c_void_p, c_char_p,
c_char_p, c_char_p, c_int)
libcrypto.EVP_CipherUpdate.argtypes = (c_void_p, c_void_p, c_void_p,
c_char_p, c_int)
libcrypto.EVP_CIPHER_CTX_cleanup.argtypes = (c_void_p,)
libcrypto.EVP_CIPHER_CTX_free.argtypes = (c_void_p,)
if hasattr(libcrypto, 'OpenSSL_add_all_ciphers'):
libcrypto.OpenSSL_add_all_ciphers()
buf = create_string_buffer(buf_size)
loaded = True
def load_cipher(cipher_name):
func_name = 'EVP_' + cipher_name.replace('-', '_')
if bytes != str:
func_name = str(func_name, 'utf-8')
cipher = getattr(libcrypto, func_name, None)
if cipher:
cipher.restype = c_void_p
return cipher()
return None
class OpenSSLCrypto(object):
def __init__(self, cipher_name, key, iv, op):
self._ctx = None
if not loaded:
load_openssl()
cipher_name = common.to_bytes(cipher_name)
cipher = libcrypto.EVP_get_cipherbyname(cipher_name)
if not cipher:
cipher = load_cipher(cipher_name)
if not cipher:
raise Exception('cipher %s not found in libcrypto' % cipher_name)
key_ptr = c_char_p(key)
iv_ptr = c_char_p(iv)
self._ctx = libcrypto.EVP_CIPHER_CTX_new()
if not self._ctx:
raise Exception('can not create cipher context')
r = libcrypto.EVP_CipherInit_ex(self._ctx, cipher, None,
key_ptr, iv_ptr, c_int(op))
if not r:
self.clean()
raise Exception('can not initialize cipher context')
def update(self, data):
global buf_size, buf
cipher_out_len = c_long(0)
l = len(data)
if buf_size < l:
buf_size = l * 2
buf = create_string_buffer(buf_size)
libcrypto.EVP_CipherUpdate(self._ctx, byref(buf),
byref(cipher_out_len), c_char_p(data), l)
# buf is copied to a str object when we access buf.raw
return buf.raw[:cipher_out_len.value]
def __del__(self):
self.clean()
def clean(self):
if self._ctx:
libcrypto.EVP_CIPHER_CTX_cleanup(self._ctx)
libcrypto.EVP_CIPHER_CTX_free(self._ctx)
ciphers = {
'aes-128-cfb': (16, 16, OpenSSLCrypto),
'aes-192-cfb': (24, 16, OpenSSLCrypto),
'aes-256-cfb': (32, 16, OpenSSLCrypto),
'aes-128-ofb': (16, 16, OpenSSLCrypto),
'aes-192-ofb': (24, 16, OpenSSLCrypto),
'aes-256-ofb': (32, 16, OpenSSLCrypto),
'aes-128-ctr': (16, 16, OpenSSLCrypto),
'aes-192-ctr': (24, 16, OpenSSLCrypto),
'aes-256-ctr': (32, 16, OpenSSLCrypto),
'aes-128-cfb8': (16, 16, OpenSSLCrypto),
'aes-192-cfb8': (24, 16, OpenSSLCrypto),
'aes-256-cfb8': (32, 16, OpenSSLCrypto),
'aes-128-cfb1': (16, 16, OpenSSLCrypto),
'aes-192-cfb1': (24, 16, OpenSSLCrypto),
'aes-256-cfb1': (32, 16, OpenSSLCrypto),
'bf-cfb': (16, 8, OpenSSLCrypto),
'camellia-128-cfb': (16, 16, OpenSSLCrypto),
'camellia-192-cfb': (24, 16, OpenSSLCrypto),
'camellia-256-cfb': (32, 16, OpenSSLCrypto),
'cast5-cfb': (16, 8, OpenSSLCrypto),
'des-cfb': (8, 8, OpenSSLCrypto),
'idea-cfb': (16, 8, OpenSSLCrypto),
'rc2-cfb': (16, 8, OpenSSLCrypto),
'rc4': (16, 0, OpenSSLCrypto),
'seed-cfb': (16, 16, OpenSSLCrypto),
}
def run_method(method):
cipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 1)
decipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_aes_128_cfb():
run_method('aes-128-cfb')
def test_aes_256_cfb():
run_method('aes-256-cfb')
def test_aes_128_cfb8():
run_method('aes-128-cfb8')
def test_aes_256_ofb():
run_method('aes-256-ofb')
def test_aes_256_ctr():
run_method('aes-256-ctr')
def test_bf_cfb():
run_method('bf-cfb')
def test_rc4():
run_method('rc4')
if __name__ == '__main__':
test_aes_128_cfb()
| apache-2.0 |
2ndQuadrant/ansible | test/units/modules/network/netact/test_netact_cm_command.py | 45 | 6140 | """
netact_cm_command unit tests
"""
# -*- coding: utf-8 -*-
# (c) 2017, Nokia
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=invalid-name,protected-access,function-redefined,unused-argument
# pylint: disable=unused-import,redundant-unittest-assert
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat import unittest
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
from ansible.modules.network.netact import netact_cm_command
from units.compat.mock import patch
from units.modules.utils import set_module_args, AnsibleExitJson, AnsibleFailJson, ModuleTestCase
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs):
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs):
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
def get_bin_path(self, arg, required=False):
"""Mock AnsibleModule.get_bin_path"""
if arg.endswith('netact_cm_command'):
return '/usr/bin/my_command'
else:
if required:
fail_json(msg='%r not found !' % arg)
class TestClass(unittest.TestCase):
"""
Test cases
"""
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json,
get_bin_path=get_bin_path)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
def test_module_fail_when_required_args_missing(self):
"""
Testing that command is failing if args are missing
:return:
"""
with self.assertRaises(AnsibleFailJson):
set_module_args({})
netact_cm_command.main()
self.assertTrue(False)
def test_ensure_command_called(self):
"""
Testing that command is executed with correct args
:return:
"""
set_module_args({
'operation': "Upload",
'opsName': 'Uploading_testi',
'DN': "PLMN-PLMN/MRBTS-746",
})
with patch.object(basic.AnsibleModule, 'run_command') as mock_run_command:
stdout = 'configuration updated'
stderr = ''
return_code = 0
mock_run_command.return_value = return_code, stdout, stderr # successful execution
with self.assertRaises(AnsibleExitJson) as result:
netact_cm_command.main()
print(result.exception.args)
self.assertTrue(result.exception.args[0]['changed']) # ensure result is changed
mock_run_command.assert_called_once_with(
['/opt/oss/bin/racclimx.sh', '-op', 'Upload', '-opsName', 'Uploading_testi',
'-DN', 'PLMN-PLMN/MRBTS-746'],
check_rc=True)
def test_ensure_backupPlanName_outputs_correctly(self):
"""
Testing that command is executed with correct args
:return:
"""
set_module_args({
'operation': "Provision",
'opsName': 'Provision_test',
'WS': "PLMN-PLMN/MRBTS-746",
'createBackupPlan': "Yes",
'backupPlanName': "backupPlanName"
})
with patch.object(basic.AnsibleModule, 'run_command') as mock_run_command:
stdout = 'configuration updated'
stderr = ''
return_code = 0
mock_run_command.return_value = return_code, stdout, stderr # successful execution
with self.assertRaises(AnsibleExitJson) as result:
netact_cm_command.main()
print(result.exception.args)
self.assertTrue(result.exception.args[0]['changed']) # ensure result is changed
mock_run_command.assert_called_once_with(
['/opt/oss/bin/racclimx.sh', '-op', 'Provision', '-opsName', 'Provision_test',
'-WS', 'PLMN-PLMN/MRBTS-746', '-createBackupPlan', 'true', '-backupPlanName', 'backupPlanName'],
check_rc=True)
def test_withwrongargs(self):
"""
Testing that wrong attribute causing error
:return:
"""
set_module_args({
'operation': "Upload",
'opsName': 'Uploading_testi',
'MR': "PLMN-PLMN/MRBTS-746",
'abc': 'abc'
})
with self.assertRaises(AnsibleFailJson):
with patch.object(basic.AnsibleModule, 'run_command') as mock_run_command:
stdout = 'configuration updated'
stderr = ''
return_code = 0
mock_run_command.return_value = return_code, stdout, stderr # successful execution
with self.assertRaises(AnsibleExitJson) as result:
netact_cm_command.main()
self.assertTrue(result.exception.args[0]['changed']) # ensure result is changed
self.assertFalse(True) # ensure result is changed
| gpl-3.0 |
DayGitH/Python-Challenges | DailyProgrammer/20120514B.py | 1 | 2681 | """
After years of study, scientists have discovered an alien language transmitted from a faraway planet. The alien
language is very unique in that every word consists of exactly L lowercase letters. Also, there are exactly D words in
this language.
Once the dictionary of all the words in the alien language was built, the next breakthrough was to discover that the
aliens have been transmitting messages to Earth for the past decade. Unfortunately, these signals are weakened due to
the distance between our two planets and some of the words may be misinterpreted. In order to help them decipher these
messages, the scientists have asked you to devise an algorithm that will determine the number of possible
interpretations for a given pattern.
A pattern consists of exactly L tokens. Each token is either a single lowercase letter (the scientists are very sure
that this is the letter) or a group of unique lowercase letters surrounded by parenthesis ( and ). For example:
(ab)d(dc) means the first letter is either a or b, the second letter is definitely d and the last letter is either d or
c. Therefore, the pattern (ab)d(dc) can stand for either one of these 4 possibilities: add, adc, bdd, bdc.
Please note that sample i/p and o/p is given in the link below
Link [https://code.google.com/codejam/contest/90101/dashboard#s=p0]
"""
def extract(d):
L = int(d[0])
D = int(d[1])
N = int(d[2])
d = d[3:]
w_list = d[:D]
inp = d[D:]
return L, D, N, w_list, inp
def separate(l):
"""
Sweeps through l from left to right and separates the format into a list of three.
If parens found, goes into collection mode before adding to result list
"""
tmp = ''
res = []
coll = False
for i in l:
# Collection mode enable/disable
if i == '(':
coll = True
tmp = ''
continue
elif i == ')':
coll = False
res.append(tmp)
continue
# if collection mode, add to temp, else directly append to result list
if coll:
tmp += i
else:
res.append(i)
return res
def compare(length, i_list, w_list):
n = 0
for w in w_list:
for m in range(length):
if w[m] not in i_list[m]:
break
else:
n += 1
return n
def main():
with open('A-large-practice.in') as f:
data = f.read().split()
L, D, N, w_list, inp = extract(data)
for n, i in enumerate(inp):
inp[n] = separate(i)
out = compare(L, inp[n], w_list)
print('Case #{}: {}'.format(n+1, out))
if __name__ == "__main__":
main()
| mit |
hijiangtao/statePrediction | uniGridDistribution-speedup.py | 1 | 2353 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import sys
import time
import logging
import getopt
from multiprocessing import Process
from util.preprocess import mergeMultiProcessMatFiles
from util.UniGridDisOnlyPoints import UniGridDisOnlyPoints
def processTask(x, city, directory, inum, onum, GRIDSNUM, subopath):
PROP = {
'INDEX': x,
'CITY': city,
'DIRECTORY': directory,
'INUM': inum,
'ONUM': onum,
'GRIDSNUM': GRIDSNUM,
'SUBOPATH': subopath
}
task = UniGridDisOnlyPoints(PROP)
task.run()
def usage():
"""
使用说明函数
"""
print '''Usage Guidance
help -h get usage guidance
city -c city or region name, such as beijing
directory -d the root directory of records and results, such as /China/beijing
inum -i number of input files
onum -o number of output files
'''
def main(argv):
"""
主入口函数
:param argv: city 表示城市, directory 表示路径, inum 表示输入文件总数, onum 表示输出文件总数, jnum 表示处理进程数,通常和 onum 一致, subopath 为结果存储的子目录名字
"""
try:
opts, args = getopt.getopt(argv, "hc:d:i:o:j:", ["help", "city=", 'directory=', 'inum=', 'onum=', 'jnum='])
except getopt.GetoptError as err:
print str(err)
usage()
sys.exit(2)
city, directory, inum, onum, jnum, subopath = 'beijing', '/home/tao.jiang/datasets/JingJinJi/records', 86, 20, 20, 'bj-newvis'
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit()
elif opt in ("-c", "--city"):
city = arg
elif opt in ("-d", "--directory"):
directory = arg
elif opt in ('-i', '--inum'):
inum = int(arg)
elif opt in ('-o', '--onum'):
onum = int(arg)
elif opt in ('-j', '--jnum'):
jnum = int(arg)
STARTTIME = time.time()
print "Start approach at %s" % STARTTIME
# 固定网格总数
GRIDSNUM = 374826
# @多进程运行程序 START
jobs = []
for x in xrange(0, jnum):
jobs.append(Process(target=processTask, args=(x, city, directory, inum, onum, GRIDSNUM, subopath)))
jobs[x].start()
for job in jobs:
job.join()
# 处理剩余数据进文件
# 合并操作
mergeMultiProcessMatFiles(directory, subopath, jnum)
# @多进程运行程序 END
print "END TIME: %s" % time.time()
if __name__ == '__main__':
logging.basicConfig(filename='logger-ugd-speedup.log', level=logging.DEBUG)
main(sys.argv[1:]) | apache-2.0 |
sorig/shogun | examples/undocumented/python/so_multiclass.py | 6 | 1584 | #!/usr/bin/env python
import numpy as np
def gen_data(num_classes,num_samples,dim):
np.random.seed(0)
covs = np.array([[[0., -1. ], [2.5, .7]],
[[3., -1.5], [1.2, .3]],
[[ 2, 0 ], [ .0, 1.5 ]]])
X = np.r_[np.dot(np.random.randn(num_samples, dim), covs[0]) + np.array([0, 10]),
np.dot(np.random.randn(num_samples, dim), covs[1]) + np.array([-10, -10]),
np.dot(np.random.randn(num_samples, dim), covs[2]) + np.array([10, -10])];
Y = np.hstack((np.zeros(num_samples), np.ones(num_samples), 2*np.ones(num_samples)))
return X, Y
# Number of classes
M = 3
# Number of samples of each class
N = 50
# Dimension of the data
dim = 2
traindat, label_traindat = gen_data(M,N,dim)
parameter_list = [[traindat,label_traindat]]
def so_multiclass (fm_train_real=traindat,label_train_multiclass=label_traindat):
try:
from shogun import RealFeatures
from shogun import MulticlassModel, MulticlassSOLabels, PrimalMosekSOSVM, RealNumber
except ImportError:
print("Mosek not available")
return
labels = MulticlassSOLabels(label_train_multiclass)
features = RealFeatures(fm_train_real.T)
model = MulticlassModel(features, labels)
sosvm = PrimalMosekSOSVM(model, labels)
sosvm.train()
out = sosvm.apply()
count = 0
for i in xrange(out.get_num_labels()):
yi_pred = RealNumber.obtain_from_generic(out.get_label(i))
if yi_pred.value == label_train_multiclass[i]:
count = count + 1
print("Correct classification rate: %0.2f" % ( 100.0*count/out.get_num_labels() ))
if __name__=='__main__':
print('SO multiclass')
so_multiclass(*parameter_list[0])
| bsd-3-clause |
risd-cs/codedrop-twitterbot | yaml/nodes.py | 985 | 1440 |
class Node(object):
def __init__(self, tag, value, start_mark, end_mark):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
value = self.value
#if isinstance(value, list):
# if len(value) == 0:
# value = '<empty>'
# elif len(value) == 1:
# value = '<1 item>'
# else:
# value = '<%d items>' % len(value)
#else:
# if len(value) > 75:
# value = repr(value[:70]+u' ... ')
# else:
# value = repr(value)
value = repr(value)
return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
class ScalarNode(Node):
id = 'scalar'
def __init__(self, tag, value,
start_mark=None, end_mark=None, style=None):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
class CollectionNode(Node):
def __init__(self, tag, value,
start_mark=None, end_mark=None, flow_style=None):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.flow_style = flow_style
class SequenceNode(CollectionNode):
id = 'sequence'
class MappingNode(CollectionNode):
id = 'mapping'
| mit |
Fusion-Rom/android_external_chromium_org | tools/telemetry/telemetry/core/possible_browser.py | 25 | 2112 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class PossibleBrowser(object):
"""A browser that can be controlled.
Call Create() to launch the browser and begin manipulating it..
"""
def __init__(self, browser_type, target_os, finder_options,
supports_tab_control):
self._browser_type = browser_type
self._target_os = target_os
self._finder_options = finder_options
self._supports_tab_control = supports_tab_control
self._platform = None
self._platform_backend = None
self._archive_path = None
self._append_to_existing_wpr = False
self._make_javascript_deterministic = True
self._credentials_path = None
def __repr__(self):
return 'PossibleBrowser(browser_type=%s)' % self.browser_type
@property
def browser_type(self):
return self._browser_type
@property
def target_os(self):
"""Target OS, the browser will run on."""
return self._target_os
@property
def finder_options(self):
return self._finder_options
@property
def supports_tab_control(self):
return self._supports_tab_control
@property
def platform(self):
self._InitPlatformIfNeeded()
return self._platform
def _InitPlatformIfNeeded(self):
raise NotImplementedError()
def Create(self):
raise NotImplementedError()
def SupportsOptions(self, finder_options):
"""Tests for extension support."""
raise NotImplementedError()
def IsRemote(self):
return False
def RunRemote(self):
pass
def UpdateExecutableIfNeeded(self):
pass
def last_modification_time(self):
return -1
def SetReplayArchivePath(self, archive_path, append_to_existing_wpr,
make_javascript_deterministic):
self._archive_path = archive_path
self._append_to_existing_wpr = append_to_existing_wpr
self._make_javascript_deterministic = make_javascript_deterministic
def SetCredentialsPath(self, credentials_path):
self._credentials_path = credentials_path
| bsd-3-clause |
idealo/mongo-connector | mongo_connector/mongo_connector.py | 4 | 20079 | # Copyright 2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file will be used with PyPi in order to package and distribute the final
# product.
"""Discovers the mongo cluster and starts the connector.
"""
import logging
import logging.handlers
import oplog_manager
import optparse
import os
import pymongo
import re
import shutil
import sys
import threading
import time
import util
import imp
from locking_dict import LockingDict
try:
from pymongo import MongoClient as Connection
except ImportError:
from pymongo import Connection
try:
import simplejson as json
except ImportError:
import json
class Connector(threading.Thread):
"""Checks the cluster for shards to tail.
"""
def __init__(self, address, oplog_checkpoint, target_url, ns_set,
u_key, auth_key, doc_manager=None, auth_username=None):
if doc_manager is not None:
doc_manager = imp.load_source('DocManager', doc_manager)
else:
from doc_manager import DocManager
time.sleep(1)
super(Connector, self).__init__()
#can_run is set to false when we join the thread
self.can_run = True
#The name of the file that stores the progress of the OplogThreads
self.oplog_checkpoint = oplog_checkpoint
#main address - either mongos for sharded setups or a primary otherwise
self.address = address
#The URL of the target system
self.target_url = target_url
#The set of relevant namespaces to consider
self.ns_set = ns_set
#The key that is a unique document identifier for the target system.
#Not necessarily the mongo unique key.
self.u_key = u_key
#Password for authentication
self.auth_key = auth_key
#Username for authentication
self.auth_username = auth_username
#The set of OplogThreads created
self.shard_set = {}
#Dict of OplogThread/timestamp pairs to record progress
self.oplog_progress = LockingDict()
try:
if target_url is None:
if doc_manager is None: # imported using from... import
self.doc_manager = DocManager(unique_key=u_key)
else: # imported using load source
self.doc_manager = doc_manager.DocManager(unique_key=u_key)
else:
if doc_manager is None:
self.doc_manager = DocManager(self.target_url,
unique_key=u_key)
else:
self.doc_manager = doc_manager.DocManager(self.target_url,
unique_key=u_key)
except SystemError:
logging.critical("MongoConnector: Bad target system URL!")
self.can_run = False
return
if self.oplog_checkpoint is not None:
if not os.path.exists(self.oplog_checkpoint):
info_str = "MongoConnector: Can't find OplogProgress file!"
logging.critical(info_str)
self.doc_manager.stop()
self.can_run = False
else:
if (not os.access(self.oplog_checkpoint, os.W_OK)
and not os.access(self.oplog_checkpoint, os.R_OK )):
logging.critical("Invalid permissions on %s! Exiting" %
(self.oplog_checkpoint))
sys.exit(1)
def join(self):
""" Joins thread, stops it from running
"""
self.can_run = False
self.doc_manager.stop()
threading.Thread.join(self)
def write_oplog_progress(self):
""" Writes oplog progress to file provided by user
"""
if self.oplog_checkpoint is None:
return None
# write to temp file
backup_file = self.oplog_checkpoint + '.backup'
os.rename(self.oplog_checkpoint, backup_file)
# for each of the threads write to file
with open(self.oplog_checkpoint, 'w') as dest:
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
for oplog, time_stamp in oplog_dict.items():
oplog_str = str(oplog)
timestamp = util.bson_ts_to_long(time_stamp)
json_str = json.dumps([oplog_str, timestamp])
try:
dest.write(json_str)
except IOError:
# Basically wipe the file, copy from backup
dest.truncate()
with open(backup_file, 'r') as backup:
shutil.copyfile(backup, dest)
break
os.remove(self.oplog_checkpoint + '.backup')
def read_oplog_progress(self):
"""Reads oplog progress from file provided by user.
This method is only called once before any threads are spanwed.
"""
if self.oplog_checkpoint is None:
return None
# Check for empty file
try:
if os.stat(self.oplog_checkpoint).st_size == 0:
logging.info("MongoConnector: Empty oplog progress file.")
return None
except OSError:
return None
source = open(self.oplog_checkpoint, 'r')
try:
data = json.load(source)
except ValueError: # empty file
reason = "It may be empty or corrupt."
logging.info("MongoConnector: Can't read oplog progress file. %s" %
(reason))
source.close()
return None
source.close()
count = 0
oplog_dict = self.oplog_progress.get_dict()
for count in range(0, len(data), 2):
oplog_str = data[count]
time_stamp = data[count + 1]
oplog_dict[oplog_str] = util.long_to_bson_ts(time_stamp)
#stored as bson_ts
def run(self):
"""Discovers the mongo cluster and creates a thread for each primary.
"""
main_conn = Connection(self.address)
if self.auth_key is not None:
main_conn['admin'].authenticate(self.auth_username, self.auth_key)
self.read_oplog_progress()
conn_type = None
try:
main_conn.admin.command("isdbgrid")
except pymongo.errors.OperationFailure:
conn_type = "REPLSET"
if conn_type == "REPLSET":
#non sharded configuration
oplog_coll = main_conn['local']['oplog.rs']
prim_admin = main_conn.admin
repl_set = prim_admin.command("replSetGetStatus")['set']
oplog = oplog_manager.OplogThread(main_conn,
(main_conn.host + ":" + str(main_conn.port)),
oplog_coll,
False, self.doc_manager,
self.oplog_progress,
self.ns_set, self.auth_key,
self.auth_username,
repl_set=repl_set)
self.shard_set[0] = oplog
logging.info('MongoConnector: Starting connection thread %s' %
main_conn)
oplog.start()
while self.can_run:
if not self.shard_set[0].running:
logging.error("MongoConnector: OplogThread"
" %s unexpectedly stopped! Shutting down" %
(str(self.shard_set[0])))
self.oplog_thread_join()
self.doc_manager.stop()
return
self.write_oplog_progress()
time.sleep(1)
else: # sharded cluster
while self.can_run is True:
for shard_doc in main_conn['config']['shards'].find():
shard_id = shard_doc['_id']
if shard_id in self.shard_set:
if not self.shard_set[shard_id].running:
logging.error("MongoConnector: OplogThread"
" %s unexpectedly stopped! Shutting down" %
(str(self.shard_set[shard_id])))
self.oplog_thread_join()
self.doc_manager.stop()
return
self.write_oplog_progress()
time.sleep(1)
continue
try:
repl_set, hosts = shard_doc['host'].split('/')
except ValueError:
cause = "The system only uses replica sets!"
logging.error("MongoConnector: %s", cause)
self.oplog_thread_join()
self.doc_manager.stop()
return
shard_conn = Connection(hosts, replicaset=repl_set)
oplog_coll = shard_conn['local']['oplog.rs']
oplog = oplog_manager.OplogThread(shard_conn, self.address,
oplog_coll, True,
self.doc_manager,
self.oplog_progress,
self.ns_set,
self.auth_key,
self.auth_username)
self.shard_set[shard_id] = oplog
msg = "Starting connection thread"
logging.info("MongoConnector: %s %s" % (msg, shard_conn))
oplog.start()
self.oplog_thread_join()
def oplog_thread_join(self):
"""Stops all the OplogThreads
"""
logging.info('MongoConnector: Stopping all OplogThreads')
for thread in self.shard_set.values():
thread.join()
def main():
""" Starts the mongo connector (assuming CLI)
"""
parser = optparse.OptionParser()
#-m is for the main address, which is a host:port pair, ideally of the
#mongos. For non sharded clusters, it can be the primary.
parser.add_option("-m", "--main", action="store", type="string",
dest="main_addr", default="localhost:27217",
help="""Specify the main address, which is a"""
""" host:port pair. For sharded clusters, this"""
""" should be the mongos address. For individual"""
""" replica sets, supply the address of the"""
""" primary. For example, `-m localhost:27217`"""
""" would be a valid argument to `-m`. Don't use"""
""" quotes around the address""")
#-o is to specify the oplog-config file. This file is used by the system
#to store the last timestamp read on a specific oplog. This allows for
#quick recovery from failure.
parser.add_option("-o", "--oplog-ts", action="store", type="string",
dest="oplog_config", default="config.txt",
help="""Specify the name of the file that stores the"""
"""oplog progress timestamps. """
"""This file is used by the system to store the last"""
"""timestamp read on a specific oplog. This allows"""
""" for quick recovery from failure. By default this"""
""" is `config.txt`, which starts off empty. An empty"""
""" file causes the system to go through all the mongo"""
""" oplog and sync all the documents. Whenever the """
"""cluster is restarted, it is essential that the """
"""oplog-timestamp config file be emptied - otherwise"""
""" the connector will miss some documents and behave"""
"""incorrectly.""")
#-t is to specify the URL to the target system being used.
parser.add_option("-t", "--target-url", action="store", type="string",
dest="url", default=None,
help="""Specify the URL to the target system being """
"""used. For example, if you were using Solr out of """
"""the box, you could use '-t """
""" http://localhost:8080/solr' with the """
""" SolrDocManager to establish a proper connection."""
""" Don't use quotes around address."""
"""If target system doesn't need URL, don't specify""")
#-n is to specify the namespaces we want to consider. The default
#considers all the namespaces
parser.add_option("-n", "--namespace-set", action="store", type="string",
dest="ns_set", default=None, help=
"""Used to specify the namespaces we want to """
""" consider. For example, if we wished to store all """
""" documents from the test.test and alpha.foo """
""" namespaces, we could use `-n test.test,alpha.foo`."""
""" The default is to consider all the namespaces, """
""" excluding the system and config databases, and """
""" also ignoring the "system.indexes" collection in """
"""any database.""")
#-u is to specify the mongoDB field that will serve as the unique key
#for the target system,
parser.add_option("-u", "--unique-key", action="store", type="string",
dest="u_key", default="_id", help=
"""Used to specify the mongoDB field that will serve"""
"""as the unique key for the target system"""
"""The default is "_id", which can be noted by """
""" '-u _id'""")
#-f is to specify the authentication key file. This file is used by mongos
#to authenticate connections to the shards, and we'll use it in the oplog
#threads.
parser.add_option("-f", "--password-file", action="store", type="string",
dest="auth_file", default=None, help=
""" Used to store the password for authentication."""
""" Use this option if you wish to specify a"""
""" username and password but don't want to"""
""" type in the password. The contents of this"""
""" file should be the password for the admin user.""")
#-p is to specify the password used for authentication.
parser.add_option("-p", "--password", action="store", type="string",
dest="password", default=None, help=
""" Used to specify the password."""
""" This is used by mongos to authenticate"""
""" connections to the shards, and in the"""
""" oplog threads. If authentication is not used, then"""
""" this field can be left empty as the default """)
#-a is to specify the username for authentication.
parser.add_option("-a", "--admin-username", action="store", type="string",
dest="admin_name", default="__system", help=
"""Used to specify the username of an admin user to"""
"""authenticate with. To use authentication, the user"""
"""must specify both an admin username and a keyFile."""
"""The default username is '__system'""")
#-d is to specify the doc manager file.
parser.add_option("-d", "--docManager", action="store", type="string",
dest="doc_manager", default=None, help=
"""Used to specify the doc manager file that"""
""" is going to be used. You should send the"""
""" path of the file you want to be used."""
""" By default, it will use the """
""" doc_manager_simulator.py file. It is"""
""" recommended that all doc manager files be"""
""" kept in the doc_managers folder in"""
""" mongo-connector. For more information"""
""" about making your own doc manager,"""
""" see Doc Manager section.""")
#-s is to enable syslog logging.
parser.add_option("-s", "--enable-syslog", action="store_true",
dest="enable_syslog", default=False, help=
"""Used to enable logging to syslog."""
""" Use -l to specify syslog host.""")
#--syslog-host is to specify the syslog host.
parser.add_option("--syslog-host", action="store", type="string",
dest="syslog_host", default="localhost:514", help=
"""Used to specify the syslog host."""
""" The default is 'localhost:514'""")
#--syslog-facility is to specify the syslog facility.
parser.add_option("--syslog-facility", action="store", type="string",
dest="syslog_facility", default="user", help=
"""Used to specify the syslog facility."""
""" The default is 'user'""")
(options, args) = parser.parse_args()
logger = logging.getLogger()
loglevel = logging.INFO
logger.setLevel(loglevel)
if options.enable_syslog:
syslog_info = options.syslog_host.split(":")
syslog_host = logging.handlers.SysLogHandler(address=(syslog_info[0],
int(syslog_info[1])),facility=options.syslog_facility)
syslog_host.setLevel(loglevel)
logger.addHandler(syslog_host)
else:
log_out = logging.StreamHandler()
log_out.setLevel(loglevel)
log_out.setFormatter(logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s'))
logger.addHandler(log_out)
logger.info('Beginning Mongo Connector')
if options.doc_manager is None:
logger.info('No doc manager specified, using simulator.')
if options.ns_set is None:
ns_set = []
else:
ns_set = options.ns_set.split(',')
key = None
if options.auth_file is not None:
try:
key = open(options.auth_file).read()
re.sub(r'\s', '', key)
except IOError:
logger.error('Could not parse password authentication file!')
sys.exit(1)
if options.password is not None:
key = options.password
if key is None and options.admin_name != "__system":
logger.error("Admin username specified without password!")
sys.exit(1)
connector = Connector(options.main_addr, options.oplog_config, options.url,
ns_set, options.u_key, key, options.doc_manager,
auth_username=options.admin_name)
connector.start()
while True:
try:
time.sleep(3)
if not connector.is_alive():
break
except KeyboardInterrupt:
logging.info("Caught keyboard interrupt, exiting!")
connector.join()
break
if __name__ == '__main__':
main()
| apache-2.0 |
anna-effeindzourou/trunk | doc/sphinx/ipython_directive.py | 8 | 18579 | # -*- coding: utf-8 -*-
"""Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives).
By default this directive assumes that your prompts are unchanged IPython ones,
but this can be customized. For example, the following code in your Sphinx
config file will configure this directive for the following input/output
prompts ``Yade [1]:`` and ``-> [1]:``::
import ipython_directive as id
id.rgxin =re.compile(r'(?:In |Yade )\[(\d+)\]:\s?(.*)\s*')
id.rgxout=re.compile(r'(?:Out| -> )\[(\d+)\]:\s?(.*)\s*')
id.fmtin ='Yade [%d]:'
id.fmtout=' -> [%d]:'
id.rc_override=dict(
prompt_in1="Yade [\#]:",
prompt_in2=" .\D..",
prompt_out=" -> [\#]:"
)
id.reconfig_shell()
import ipython_console_highlighting as ich
ich.IPythonConsoleLexer.input_prompt=
re.compile("(Yade \[[0-9]+\]: )|( \.\.\.+:)")
ich.IPythonConsoleLexer.output_prompt=
re.compile("(( -> )|(Out)\[[0-9]+\]: )|( \.\.\.+:)")
ich.IPythonConsoleLexer.continue_prompt=re.compile(" \.\.\.+:")
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
- Make sure %bookmarks used internally are removed on exit.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generatlizations.
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import cStringIO
import imp
import os
import re
import shutil
import sys
import warnings
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import matplotlib
import sphinx
from docutils.parsers.rst import directives
matplotlib.use('Agg')
# Our own
import IPython
from IPython.Shell import MatplotlibShell
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
sphinx_version = sphinx.__version__.split(".")
# The split is necessary for sphinx beta versions where the string is
# '6b1'
sphinx_version = tuple([int(re.split('[a-z]', x)[0])
for x in sphinx_version[:2]])
COMMENT, INPUT, OUTPUT = range(3)
rc_override = {}
rgxin = re.compile('In \[(\d+)\]:\s?(.*)\s*')
rgxcont = re.compile(' \.+:\s?(.*)\s*')
rgxout = re.compile('Out\[(\d+)\]:\s?(.*)\s*')
fmtin = 'In [%d]:'
fmtout = 'Out[%d]:'
fmtcont = ' .\D.:'
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
#continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
#Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
matchcont = rgxcont.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif matchcont: #nextline.startswith(continuation):
inputline += '\n' + matchcont.group(1) #nextline[Nc:]
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self):
self.cout = cStringIO.StringIO()
IPython.Shell.Term.cout = self.cout
IPython.Shell.Term.cerr = self.cout
argv = ['-autocall', '0']
self.user_ns = {}
self.user_glocal_ns = {}
self.IP = IPython.ipmaker.make_IPython(
argv, self.user_ns, self.user_glocal_ns, embedded=True,
#shell_class=IPython.Shell.InteractiveShell,
shell_class=MatplotlibShell,
rc_override = dict(colors = 'NoColor', **rc_override))
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
# we need bookmark the current dir first so we can save
# relative to it
self.process_input_line('bookmark ipy_basedir')
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line):
"""process the input, capturing stdout"""
#print "input='%s'"%self.input
stdout = sys.stdout
sys.stdout = self.cout
#self.IP.resetbuffer()
self.IP.push(self.IP.prefilter(line, 0))
#self.IP.runlines(line)
sys.stdout = stdout
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""Process data block for INPUT token."""
decorator, input, rest = data
image_file = None
#print 'INPUT:', data
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = decorator=='@doctest' or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
input_lines = input.split('\n')
#continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
#Nc = len(continuation)
if is_savefig:
saveargs = decorator.split(' ')
filename = saveargs[1]
outfile = os.path.join('_static/%s'%filename)
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = outfile
image_directive = '\n'.join(imagerows)
# TODO: can we get "rest" from ipython
#self.process_input_line('\n'.join(input_lines))
ret = []
is_semicolon = False
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i==0:
# process the first input line
if is_verbatim:
self.process_input_line('')
else:
# only submit the line in non-verbatim mode
self.process_input_line(line)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line)
formatted_line = fmtcont.replace('\D','.'*len(str(lineno)))+line #'%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress:
if len(rest.strip()):
if is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
self.cout.truncate(0)
return ret, input_lines, output, is_doctest, image_file
#print 'OUTPUT', output # dbg
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, image_file):
"""Process data block for OUTPUT token."""
if is_doctest:
submitted = data.strip()
found = output
if found is not None:
ind = found.find(output_prompt)
if ind<0:
raise RuntimeError('output prompt="%s" does not match out line=%s'%(output_prompt, found))
found = found[len(output_prompt):].strip()
if found!=submitted:
raise RuntimeError('doctest failure for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted))
#print 'doctest PASSED for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted)
def process_comment(self, data):
"""Process data block for COMMENT token."""
if not self.is_suppress:
return [data]
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
m = rgxin.match(str(self.IP.outputcache.prompt1).strip())
lineno = int(m.group(1))
input_prompt = fmtin%lineno
output_prompt = fmtout%lineno
image_file = None
image_directive = None
# XXX - This needs a second refactor. There's too much state being
# held globally, which makes for a very awkward interface and large,
# hard to test functions. I've already broken this up at least into
# three separate processors to isolate the logic better, but this only
# serves to highlight the coupling. Next we need to clean it up...
for token, data in block:
if token==COMMENT:
out_data = self.process_comment(data)
elif token==INPUT:
out_data, input_lines, output, is_doctest, image_file= \
self.process_input(data, input_prompt, lineno)
elif token==OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
image_file)
if out_data:
ret.extend(out_data)
if image_file is not None:
self.ensure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir')
self.process_input_line('cd -b ipy_basedir')
self.process_input_line(command)
self.process_input_line('cd -b ipy_thisdir')
self.cout.seek(0)
self.cout.truncate(0)
return ret, image_directive
def ensure_pyplot(self):
if self._pyplot_imported:
return
self.process_input_line('import matplotlib.pyplot as plt')
# A global instance used below. XXX: not sure why this can't be created inside
# ipython_directive itself.
shell = EmbeddedSphinxShell()
def reconfig_shell():
"""Called after setting module-level variables to re-instantiate
with the set values (since shell is instantiated first at import-time
when module variables have default values)"""
global shell
shell = EmbeddedSphinxShell()
def ipython_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine,
):
debug = ipython_directive.DEBUG
shell.is_suppress = options.has_key('suppress')
shell.is_doctest = options.has_key('doctest')
shell.is_verbatim = options.has_key('verbatim')
#print 'ipy', shell.is_suppress, options
parts = '\n'.join(content).split('\n\n')
lines = ['.. sourcecode:: ipython', '']
figures = []
for part in parts:
block = block_parser(part)
if len(block):
rows, figure = shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
#print lines
if len(lines)>2:
if debug:
print '\n'.join(lines)
else:
#print 'INSERTING %d lines'%len(lines)
state_machine.insert_input(
lines, state_machine.input_lines.source(0))
return []
ipython_directive.DEBUG = False
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
options = {'suppress': directives.flag,
'doctest': directives.flag,
'verbatim': directives.flag,
}
app.add_directive('ipython', ipython_directive, True, (0, 2, 0), **options)
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: np.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
""",
]
ipython_directive.DEBUG = True
#options = dict(suppress=True)
options = dict()
for example in examples:
content = example.split('\n')
ipython_directive('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
test()
| gpl-2.0 |
nishad-jobsglobal/odoo-marriot | addons/product/pricelist.py | 154 | 26498 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from itertools import chain
import time
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.exceptions import except_orm
import openerp.addons.decimal_precision as dp
class price_type(osv.osv):
"""
The price type is used to points which field in the product form
is a price and in which currency is this price expressed.
When a field is a price, you can use it in pricelists to base
sale and purchase prices based on some fields of the product.
"""
def _price_field_get(self, cr, uid, context=None):
mf = self.pool.get('ir.model.fields')
ids = mf.search(cr, uid, [('model','in', (('product.product'),('product.template'))), ('ttype','=','float')], context=context)
res = []
for field in mf.browse(cr, uid, ids, context=context):
if not (field.name, field.field_description) in res:
res.append((field.name, field.field_description))
return res
def _get_field_currency(self, cr, uid, fname, ctx):
ids = self.search(cr, uid, [('field','=',fname)], context=ctx)
return self.browse(cr, uid, ids, context=ctx)[0].currency_id
def _get_currency(self, cr, uid, ctx):
comp = self.pool.get('res.users').browse(cr,uid,uid).company_id
if not comp:
comp_id = self.pool.get('res.company').search(cr, uid, [])[0]
comp = self.pool.get('res.company').browse(cr, uid, comp_id)
return comp.currency_id.id
_name = "product.price.type"
_description = "Price Type"
_columns = {
"name" : fields.char("Price Name", required=True, translate=True, help="Name of this kind of price."),
"active" : fields.boolean("Active"),
"field" : fields.selection(_price_field_get, "Product Field", size=32, required=True, help="Associated field in the product form."),
"currency_id" : fields.many2one('res.currency', "Currency", required=True, help="The currency the field is expressed in."),
}
_defaults = {
"active": lambda *args: True,
"currency_id": _get_currency
}
#----------------------------------------------------------
# Price lists
#----------------------------------------------------------
class product_pricelist_type(osv.osv):
_name = "product.pricelist.type"
_description = "Pricelist Type"
_columns = {
'name': fields.char('Name', required=True, translate=True),
'key': fields.char('Key', required=True, help="Used in the code to select specific prices based on the context. Keep unchanged."),
}
class product_pricelist(osv.osv):
def _pricelist_type_get(self, cr, uid, context=None):
pricelist_type_obj = self.pool.get('product.pricelist.type')
pricelist_type_ids = pricelist_type_obj.search(cr, uid, [], order='name')
pricelist_types = pricelist_type_obj.read(cr, uid, pricelist_type_ids, ['key','name'], context=context)
res = []
for type in pricelist_types:
res.append((type['key'],type['name']))
return res
_name = "product.pricelist"
_description = "Pricelist"
_order = 'name'
_columns = {
'name': fields.char('Pricelist Name', required=True, translate=True),
'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the pricelist without removing it."),
'type': fields.selection(_pricelist_type_get, 'Pricelist Type', required=True),
'version_id': fields.one2many('product.pricelist.version', 'pricelist_id', 'Pricelist Versions', copy=True),
'currency_id': fields.many2one('res.currency', 'Currency', required=True),
'company_id': fields.many2one('res.company', 'Company'),
}
def name_get(self, cr, uid, ids, context=None):
result= []
if not all(ids):
return result
for pl in self.browse(cr, uid, ids, context=context):
name = pl.name + ' ('+ pl.currency_id.name + ')'
result.append((pl.id,name))
return result
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if name and operator == '=' and not args:
# search on the name of the pricelist and its currency, opposite of name_get(),
# Used by the magic context filter in the product search view.
query_args = {'name': name, 'limit': limit, 'lang': (context or {}).get('lang') or 'en_US'}
query = """SELECT p.id
FROM ((
SELECT pr.id, pr.name
FROM product_pricelist pr JOIN
res_currency cur ON
(pr.currency_id = cur.id)
WHERE pr.name || ' (' || cur.name || ')' = %(name)s
)
UNION (
SELECT tr.res_id as id, tr.value as name
FROM ir_translation tr JOIN
product_pricelist pr ON (
pr.id = tr.res_id AND
tr.type = 'model' AND
tr.name = 'product.pricelist,name' AND
tr.lang = %(lang)s
) JOIN
res_currency cur ON
(pr.currency_id = cur.id)
WHERE tr.value || ' (' || cur.name || ')' = %(name)s
)
) p
ORDER BY p.name"""
if limit:
query += " LIMIT %(limit)s"
cr.execute(query, query_args)
ids = [r[0] for r in cr.fetchall()]
# regular search() to apply ACLs - may limit results below limit in some cases
ids = self.search(cr, uid, [('id', 'in', ids)], limit=limit, context=context)
if ids:
return self.name_get(cr, uid, ids, context)
return super(product_pricelist, self).name_search(
cr, uid, name, args, operator=operator, context=context, limit=limit)
def _get_currency(self, cr, uid, ctx):
comp = self.pool.get('res.users').browse(cr, uid, uid).company_id
if not comp:
comp_id = self.pool.get('res.company').search(cr, uid, [])[0]
comp = self.pool.get('res.company').browse(cr, uid, comp_id)
return comp.currency_id.id
_defaults = {
'active': lambda *a: 1,
"currency_id": _get_currency
}
def price_get_multi(self, cr, uid, ids, products_by_qty_by_partner, context=None):
return dict((key, dict((key, price[0]) for key, price in value.items())) for key, value in self.price_rule_get_multi(cr, uid, ids, products_by_qty_by_partner, context=context).items())
def price_rule_get_multi(self, cr, uid, ids, products_by_qty_by_partner, context=None):
"""multi products 'price_get'.
@param ids:
@param products_by_qty:
@param partner:
@param context: {
'date': Date of the pricelist (%Y-%m-%d),}
@return: a dict of dict with product_id as key and a dict 'price by pricelist' as value
"""
if not ids:
ids = self.pool.get('product.pricelist').search(cr, uid, [], context=context)
results = {}
for pricelist in self.browse(cr, uid, ids, context=context):
subres = self._price_rule_get_multi(cr, uid, pricelist, products_by_qty_by_partner, context=context)
for product_id,price in subres.items():
results.setdefault(product_id, {})
results[product_id][pricelist.id] = price
return results
def _price_get_multi(self, cr, uid, pricelist, products_by_qty_by_partner, context=None):
return dict((key, price[0]) for key, price in self._price_rule_get_multi(cr, uid, pricelist, products_by_qty_by_partner, context=context).items())
def _price_rule_get_multi(self, cr, uid, pricelist, products_by_qty_by_partner, context=None):
context = context or {}
date = context.get('date') or time.strftime('%Y-%m-%d')
date = date[0:10]
products = map(lambda x: x[0], products_by_qty_by_partner)
currency_obj = self.pool.get('res.currency')
product_obj = self.pool.get('product.template')
product_uom_obj = self.pool.get('product.uom')
price_type_obj = self.pool.get('product.price.type')
if not products:
return {}
version = False
for v in pricelist.version_id:
if ((v.date_start is False) or (v.date_start <= date)) and ((v.date_end is False) or (v.date_end >= date)):
version = v
break
if not version:
raise osv.except_osv(_('Warning!'), _("At least one pricelist has no active version !\nPlease create or activate one."))
categ_ids = {}
for p in products:
categ = p.categ_id
while categ:
categ_ids[categ.id] = True
categ = categ.parent_id
categ_ids = categ_ids.keys()
is_product_template = products[0]._name == "product.template"
if is_product_template:
prod_tmpl_ids = [tmpl.id for tmpl in products]
# all variants of all products
prod_ids = [p.id for p in
list(chain.from_iterable([t.product_variant_ids for t in products]))]
else:
prod_ids = [product.id for product in products]
prod_tmpl_ids = [product.product_tmpl_id.id for product in products]
# Load all rules
cr.execute(
'SELECT i.id '
'FROM product_pricelist_item AS i '
'WHERE (product_tmpl_id IS NULL OR product_tmpl_id = any(%s)) '
'AND (product_id IS NULL OR (product_id = any(%s))) '
'AND ((categ_id IS NULL) OR (categ_id = any(%s))) '
'AND (price_version_id = %s) '
'ORDER BY sequence, min_quantity desc',
(prod_tmpl_ids, prod_ids, categ_ids, version.id))
item_ids = [x[0] for x in cr.fetchall()]
items = self.pool.get('product.pricelist.item').browse(cr, uid, item_ids, context=context)
price_types = {}
results = {}
for product, qty, partner in products_by_qty_by_partner:
results[product.id] = 0.0
rule_id = False
price = False
# Final unit price is computed according to `qty` in the `qty_uom_id` UoM.
# An intermediary unit price may be computed according to a different UoM, in
# which case the price_uom_id contains that UoM.
# The final price will be converted to match `qty_uom_id`.
qty_uom_id = context.get('uom') or product.uom_id.id
price_uom_id = product.uom_id.id
qty_in_product_uom = qty
if qty_uom_id != product.uom_id.id:
try:
qty_in_product_uom = product_uom_obj._compute_qty(
cr, uid, context['uom'], qty, product.uom_id.id or product.uos_id.id)
except except_orm:
# Ignored - incompatible UoM in context, use default product UoM
pass
for rule in items:
if rule.min_quantity and qty_in_product_uom < rule.min_quantity:
continue
if is_product_template:
if rule.product_tmpl_id and product.id != rule.product_tmpl_id.id:
continue
if rule.product_id and \
(product.product_variant_count > 1 or product.product_variant_ids[0].id != rule.product_id.id):
# product rule acceptable on template if has only one variant
continue
else:
if rule.product_tmpl_id and product.product_tmpl_id.id != rule.product_tmpl_id.id:
continue
if rule.product_id and product.id != rule.product_id.id:
continue
if rule.categ_id:
cat = product.categ_id
while cat:
if cat.id == rule.categ_id.id:
break
cat = cat.parent_id
if not cat:
continue
if rule.base == -1:
if rule.base_pricelist_id:
price_tmp = self._price_get_multi(cr, uid,
rule.base_pricelist_id, [(product,
qty, partner)], context=context)[product.id]
ptype_src = rule.base_pricelist_id.currency_id.id
price_uom_id = qty_uom_id
price = currency_obj.compute(cr, uid,
ptype_src, pricelist.currency_id.id,
price_tmp, round=False,
context=context)
elif rule.base == -2:
seller = False
for seller_id in product.seller_ids:
if (not partner) or (seller_id.name.id != partner):
continue
seller = seller_id
if not seller and product.seller_ids:
seller = product.seller_ids[0]
if seller:
qty_in_seller_uom = qty
seller_uom = seller.product_uom.id
if qty_uom_id != seller_uom:
qty_in_seller_uom = product_uom_obj._compute_qty(cr, uid, qty_uom_id, qty, to_uom_id=seller_uom)
price_uom_id = seller_uom
for line in seller.pricelist_ids:
if line.min_quantity <= qty_in_seller_uom:
price = line.price
else:
if rule.base not in price_types:
price_types[rule.base] = price_type_obj.browse(cr, uid, int(rule.base))
price_type = price_types[rule.base]
# price_get returns the price in the context UoM, i.e. qty_uom_id
price_uom_id = qty_uom_id
price = currency_obj.compute(
cr, uid,
price_type.currency_id.id, pricelist.currency_id.id,
product_obj._price_get(cr, uid, [product], price_type.field, context=context)[product.id],
round=False, context=context)
if price is not False:
price_limit = price
price = price * (1.0+(rule.price_discount or 0.0))
if rule.price_round:
price = tools.float_round(price, precision_rounding=rule.price_round)
convert_to_price_uom = (lambda price: product_uom_obj._compute_price(
cr, uid, product.uom_id.id,
price, price_uom_id))
if rule.price_surcharge:
price_surcharge = convert_to_price_uom(rule.price_surcharge)
price += price_surcharge
if rule.price_min_margin:
price_min_margin = convert_to_price_uom(rule.price_min_margin)
price = max(price, price_limit + price_min_margin)
if rule.price_max_margin:
price_max_margin = convert_to_price_uom(rule.price_max_margin)
price = min(price, price_limit + price_max_margin)
rule_id = rule.id
break
# Final price conversion to target UoM
price = product_uom_obj._compute_price(cr, uid, price_uom_id, price, qty_uom_id)
results[product.id] = (price, rule_id)
return results
def price_get(self, cr, uid, ids, prod_id, qty, partner=None, context=None):
return dict((key, price[0]) for key, price in self.price_rule_get(cr, uid, ids, prod_id, qty, partner=partner, context=context).items())
def price_rule_get(self, cr, uid, ids, prod_id, qty, partner=None, context=None):
product = self.pool.get('product.product').browse(cr, uid, prod_id, context=context)
res_multi = self.price_rule_get_multi(cr, uid, ids, products_by_qty_by_partner=[(product, qty, partner)], context=context)
res = res_multi[prod_id]
return res
class product_pricelist_version(osv.osv):
_name = "product.pricelist.version"
_description = "Pricelist Version"
_columns = {
'pricelist_id': fields.many2one('product.pricelist', 'Price List',
required=True, select=True, ondelete='cascade'),
'name': fields.char('Name', required=True, translate=True),
'active': fields.boolean('Active',
help="When a version is duplicated it is set to non active, so that the " \
"dates do not overlaps with original version. You should change the dates " \
"and reactivate the pricelist"),
'items_id': fields.one2many('product.pricelist.item',
'price_version_id', 'Price List Items', required=True, copy=True),
'date_start': fields.date('Start Date', help="First valid date for the version."),
'date_end': fields.date('End Date', help="Last valid date for the version."),
'company_id': fields.related('pricelist_id','company_id',type='many2one',
readonly=True, relation='res.company', string='Company', store=True)
}
_defaults = {
'active': lambda *a: 1,
}
def _check_date(self, cursor, user, ids, context=None):
for pricelist_version in self.browse(cursor, user, ids, context=context):
if not pricelist_version.active:
continue
where = []
if pricelist_version.date_start:
where.append("((date_end>='%s') or (date_end is null))" % (pricelist_version.date_start,))
if pricelist_version.date_end:
where.append("((date_start<='%s') or (date_start is null))" % (pricelist_version.date_end,))
cursor.execute('SELECT id ' \
'FROM product_pricelist_version ' \
'WHERE '+' and '.join(where) + (where and ' and ' or '')+
'pricelist_id = %s ' \
'AND active ' \
'AND id <> %s', (
pricelist_version.pricelist_id.id,
pricelist_version.id))
if cursor.fetchall():
return False
return True
_constraints = [
(_check_date, 'You cannot have 2 pricelist versions that overlap!',
['date_start', 'date_end'])
]
def copy(self, cr, uid, id, default=None, context=None):
# set active False to prevent overlapping active pricelist
# versions
if not default:
default = {}
default['active'] = False
return super(product_pricelist_version, self).copy(cr, uid, id, default, context=context)
class product_pricelist_item(osv.osv):
def _price_field_get(self, cr, uid, context=None):
pt = self.pool.get('product.price.type')
ids = pt.search(cr, uid, [], context=context)
result = []
for line in pt.browse(cr, uid, ids, context=context):
result.append((line.id, line.name))
result.append((-1, _('Other Pricelist')))
result.append((-2, _('Supplier Prices on the product form')))
return result
# Added default function to fetch the Price type Based on Pricelist type.
def _get_default_base(self, cr, uid, fields, context=None):
product_price_type_obj = self.pool.get('product.price.type')
if fields.get('type') == 'purchase':
product_price_type_ids = product_price_type_obj.search(cr, uid, [('field', '=', 'standard_price')], context=context)
elif fields.get('type') == 'sale':
product_price_type_ids = product_price_type_obj.search(cr, uid, [('field','=','list_price')], context=context)
else:
return -1
if not product_price_type_ids:
return False
else:
pricetype = product_price_type_obj.browse(cr, uid, product_price_type_ids, context=context)[0]
return pricetype.id
_name = "product.pricelist.item"
_description = "Pricelist item"
_order = "sequence, min_quantity desc"
_defaults = {
'base': _get_default_base,
'min_quantity': lambda *a: 0,
'sequence': lambda *a: 5,
'price_discount': lambda *a: 0,
}
def _check_recursion(self, cr, uid, ids, context=None):
for obj_list in self.browse(cr, uid, ids, context=context):
if obj_list.base == -1:
main_pricelist = obj_list.price_version_id.pricelist_id.id
other_pricelist = obj_list.base_pricelist_id.id
if main_pricelist == other_pricelist:
return False
return True
def _check_margin(self, cr, uid, ids, context=None):
for item in self.browse(cr, uid, ids, context=context):
if item.price_max_margin and item.price_min_margin and (item.price_min_margin > item.price_max_margin):
return False
return True
_columns = {
'name': fields.char('Rule Name', help="Explicit rule name for this pricelist line."),
'price_version_id': fields.many2one('product.pricelist.version', 'Price List Version', required=True, select=True, ondelete='cascade'),
'product_tmpl_id': fields.many2one('product.template', 'Product Template', ondelete='cascade', help="Specify a template if this rule only applies to one product template. Keep empty otherwise."),
'product_id': fields.many2one('product.product', 'Product', ondelete='cascade', help="Specify a product if this rule only applies to one product. Keep empty otherwise."),
'categ_id': fields.many2one('product.category', 'Product Category', ondelete='cascade', help="Specify a product category if this rule only applies to products belonging to this category or its children categories. Keep empty otherwise."),
'min_quantity': fields.integer('Min. Quantity', required=True,
help="For the rule to apply, bought/sold quantity must be greater "
"than or equal to the minimum quantity specified in this field.\n"
"Expressed in the default UoM of the product."
),
'sequence': fields.integer('Sequence', required=True, help="Gives the order in which the pricelist items will be checked. The evaluation gives highest priority to lowest sequence and stops as soon as a matching item is found."),
'base': fields.selection(_price_field_get, 'Based on', required=True, size=-1, help="Base price for computation."),
'base_pricelist_id': fields.many2one('product.pricelist', 'Other Pricelist'),
'price_surcharge': fields.float('Price Surcharge',
digits_compute= dp.get_precision('Product Price'), help='Specify the fixed amount to add or substract(if negative) to the amount calculated with the discount.'),
'price_discount': fields.float('Price Discount', digits=(16,4)),
'price_round': fields.float('Price Rounding',
digits_compute= dp.get_precision('Product Price'),
help="Sets the price so that it is a multiple of this value.\n" \
"Rounding is applied after the discount and before the surcharge.\n" \
"To have prices that end in 9.99, set rounding 10, surcharge -0.01" \
),
'price_min_margin': fields.float('Min. Price Margin',
digits_compute= dp.get_precision('Product Price'), help='Specify the minimum amount of margin over the base price.'),
'price_max_margin': fields.float('Max. Price Margin',
digits_compute= dp.get_precision('Product Price'), help='Specify the maximum amount of margin over the base price.'),
'company_id': fields.related('price_version_id','company_id',type='many2one',
readonly=True, relation='res.company', string='Company', store=True)
}
_constraints = [
(_check_recursion, 'Error! You cannot assign the Main Pricelist as Other Pricelist in PriceList Item!', ['base_pricelist_id']),
(_check_margin, 'Error! The minimum margin should be lower than the maximum margin.', ['price_min_margin', 'price_max_margin'])
]
def product_id_change(self, cr, uid, ids, product_id, context=None):
if not product_id:
return {}
prod = self.pool.get('product.product').read(cr, uid, [product_id], ['code','name'])
if prod[0]['code']:
return {'value': {'name': prod[0]['code']}}
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
neuropoly/spinalcordtoolbox | unit_testing/test_deepseg_sc.py | 1 | 3912 | #!/usr/bin/env python
# -*- coding: utf-8
# pytest unit tests for spinalcordtoolbox.deepseg_sc
import pytest
import numpy as np
import nibabel as nib
from keras import backend as K
import spinalcordtoolbox as sct
from spinalcordtoolbox.image import Image
import spinalcordtoolbox.deepseg_sc.core
from spinalcordtoolbox.testing.create_test_data import dummy_centerline
from spinalcordtoolbox.utils import sct_test_path
param_deepseg = [
({'fname_seg_manual':
sct_test_path('t2', 't2_seg-deepseg_sc-2d.nii.gz'),
'contrast': 't2', 'kernel': '2d'}),
({'fname_seg_manual':
sct_test_path('t2', 't2_seg-deepseg_sc-3d.nii.gz'),
'contrast': 't2', 'kernel': '3d'}),
]
# noinspection 801,PyShadowingNames
@pytest.mark.parametrize('params', param_deepseg)
def test_deep_segmentation_spinalcord(params):
"""High level segmentation API"""
fname_im = sct_test_path('t2', 't2.nii.gz')
fname_centerline_manual = sct_test_path('t2', 't2_centerline-manual.nii.gz')
# Call segmentation function
im_seg, _, _ = sct.deepseg_sc.core.deep_segmentation_spinalcord(
Image(fname_im), params['contrast'], ctr_algo='file', ctr_file=fname_centerline_manual, brain_bool=False,
kernel_size=params['kernel'], threshold_seg=0.5)
assert im_seg.data.dtype == np.dtype('uint8')
# Compare with ground-truth segmentation
assert np.all(im_seg.data == Image(params['fname_seg_manual']).data)
def test_intensity_normalization():
data_in = np.random.rand(10, 10)
min_out, max_out = 0, 255
data_out = sct.deepseg_sc.core.scale_intensity(data_in, out_min=0, out_max=255)
assert data_in.shape == data_out.shape
assert np.min(data_out) >= min_out
assert np.max(data_out) <= max_out
def test_crop_image_around_centerline():
input_shape = (100, 100, 100)
crop_size = 20
data = np.random.rand(input_shape[0], input_shape[1], input_shape[2])
affine = np.eye(4)
nii = nib.nifti1.Nifti1Image(data, affine)
img = Image(data, hdr=nii.header, dim=nii.header.get_data_shape())
ctr, _, _ = dummy_centerline(size_arr=input_shape)
_, _, _, img_out = sct.deepseg_sc.core.crop_image_around_centerline(
im_in=img.copy(), ctr_in=ctr.copy(), crop_size=crop_size)
img_in_z0 = img.data[:, :, 0]
x_ctr_z0, y_ctr_z0 = np.where(ctr.data[:, :, 0])[0][0], np.where(ctr.data[:, :, 0])[1][0]
x_start, x_end = sct.deepseg_sc.core._find_crop_start_end(x_ctr_z0, crop_size, img.dim[0])
y_start, y_end = sct.deepseg_sc.core._find_crop_start_end(y_ctr_z0, crop_size, img.dim[1])
img_in_z0_crop = img_in_z0[x_start:x_end, y_start:y_end]
assert img_out.data.shape == (crop_size, crop_size, input_shape[2])
assert np.allclose(img_in_z0_crop, img_out.data[:, :, 0])
def test_uncrop_image():
input_shape = (100, 100, 100)
crop_size = 20
data_crop = np.random.randint(0, 2, size=(crop_size, crop_size, input_shape[2]))
data_in = np.random.randint(0, 1000, size=input_shape)
x_crop_lst = list(np.random.randint(0, input_shape[0] - crop_size, input_shape[2]))
y_crop_lst = list(np.random.randint(0, input_shape[1] - crop_size, input_shape[2]))
z_crop_lst = range(input_shape[2])
affine = np.eye(4)
nii = nib.nifti1.Nifti1Image(data_in, affine)
img_in = Image(data_in, hdr=nii.header, dim=nii.header.get_data_shape())
img_uncrop = sct.deepseg_sc.core.uncrop_image(
ref_in=img_in, data_crop=data_crop, x_crop_lst=x_crop_lst, y_crop_lst=y_crop_lst, z_crop_lst=z_crop_lst)
assert img_uncrop.data.shape == input_shape
z_rand = np.random.randint(0, input_shape[2])
assert np.allclose(img_uncrop.data[x_crop_lst[z_rand]:x_crop_lst[z_rand] + crop_size,
y_crop_lst[z_rand]:y_crop_lst[z_rand] + crop_size,
z_rand],
data_crop[:, :, z_rand])
| mit |
net592/OneOps | dashboard/views.py | 1 | 2419 | #coding=utf-8
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from dashboard.models import *
from returner.models import *
import logging
from shaker.tasks import dashboard_task, grains_task
from shaker.check_service import CheckPort, CheckProgress
logger = logging.getLogger('django')
@login_required(login_url="/account/login/")
def index(request):
try:
dashboard_task.delay()
grains_task.delay()
except:
logger.error("Connection refused, don't connect rabbitmq service")
try:
dashboard_status = Dashboard_status.objects.get(id=1)
except:
status_list = [0, 0, 0, 0, 0]
else:
status_list = [int(dashboard_status.up),
int(dashboard_status.down),
int(dashboard_status.accepted),
int(dashboard_status.unaccepted),
int(dashboard_status.rejected),
]
logger.info(status_list)
salt_grains = Salt_grains.objects.all()
release_list = []
os_all = []
os_release = []
for release in salt_grains:
release_dic = eval(release.grains)
release_info = release_dic.get('osfullname').decode('string-escape') + release_dic.get('osrelease').decode('string-escape')
release_list.append(release_info)
os_release = list(set(release_list))
#定义
logger.info(os_release)
for release_name in os_release:
os_dic = {'name': release_name, 'value': release_list.count(release_name)}
os_all.append(os_dic)
logger.info(os_all)
salt_master_stauts = CheckPort('Salt Master', '127.0.0.1', 4505)
salt_api_status = CheckPort('Salt Api', '127.0.0.1', 8000)
rabbitmy_status = CheckPort('RabbixMQ', '127.0.0.1', 5672)
rabbitmy_m_status = CheckPort('RabbixMQ Management', '127.0.0.1', 15672)
celery_statu = CheckProgress('Celery', 'celery worker')
check_service = [salt_master_stauts, salt_api_status, rabbitmy_status, rabbitmy_m_status, celery_statu]
return render(request, 'dashboard/index.html', {'status': status_list,
'os_release': os_release,
'os_all': os_all,
'check_service': check_service,
})
| apache-2.0 |
peterm-itr/edx-platform | common/test/acceptance/tests/helpers.py | 5 | 7956 | """
Test helper functions and base classes.
"""
import json
import unittest
import functools
import requests
import os
from path import path
from bok_choy.web_app_test import WebAppTest
from opaque_keys.edx.locator import CourseLocator
def skip_if_browser(browser):
"""
Method decorator that skips a test if browser is `browser`
Args:
browser (str): name of internet browser
Returns:
Decorated function
"""
def decorator(test_function):
@functools.wraps(test_function)
def wrapper(self, *args, **kwargs):
if self.browser.name == browser:
raise unittest.SkipTest('Skipping as this test will not work with {}'.format(browser))
test_function(self, *args, **kwargs)
return wrapper
return decorator
def is_youtube_available():
"""
Check if the required youtube urls are available.
If a URL in `youtube_api_urls` is not reachable then subsequent URLs will not be checked.
Returns:
bool:
"""
youtube_api_urls = {
'main': 'https://www.youtube.com/',
'player': 'http://www.youtube.com/iframe_api',
'metadata': 'http://gdata.youtube.com/feeds/api/videos/',
# For transcripts, you need to check an actual video, so we will
# just specify our default video and see if that one is available.
'transcript': 'http://video.google.com/timedtext?lang=en&v=OEoXaMPEzfM',
}
for url in youtube_api_urls.itervalues():
try:
response = requests.get(url, allow_redirects=False)
except requests.exceptions.ConnectionError:
return False
if response.status_code >= 300:
return False
return True
def load_data_str(rel_path):
"""
Load a file from the "data" directory as a string.
`rel_path` is the path relative to the data directory.
"""
full_path = path(__file__).abspath().dirname() / "data" / rel_path # pylint: disable=no-value-for-parameter
with open(full_path) as data_file:
return data_file.read()
def disable_animations(page):
"""
Disable jQuery and CSS3 animations.
"""
disable_jquery_animations(page)
disable_css_animations(page)
def enable_animations(page):
"""
Enable jQuery and CSS3 animations.
"""
enable_jquery_animations(page)
enable_css_animations(page)
def disable_jquery_animations(page):
"""
Disable jQuery animations.
"""
page.browser.execute_script("jQuery.fx.off = true;")
def enable_jquery_animations(page):
"""
Enable jQuery animations.
"""
page.browser.execute_script("jQuery.fx.off = false;")
def disable_css_animations(page):
"""
Disable CSS3 animations, transitions, transforms.
"""
page.browser.execute_script("""
var id = 'no-transitions';
// if styles were already added, just do nothing.
if (document.getElementById(id)) {
return;
}
var css = [
'* {',
'-webkit-transition: none !important;',
'-moz-transition: none !important;',
'-o-transition: none !important;',
'-ms-transition: none !important;',
'transition: none !important;',
'-webkit-transition-property: none !important;',
'-moz-transition-property: none !important;',
'-o-transition-property: none !important;',
'-ms-transition-property: none !important;',
'transition-property: none !important;',
'-webkit-transform: none !important;',
'-moz-transform: none !important;',
'-o-transform: none !important;',
'-ms-transform: none !important;',
'transform: none !important;',
'-webkit-animation: none !important;',
'-moz-animation: none !important;',
'-o-animation: none !important;',
'-ms-animation: none !important;',
'animation: none !important;',
'}'
].join(''),
head = document.head || document.getElementsByTagName('head')[0],
styles = document.createElement('style');
styles.id = id;
styles.type = 'text/css';
if (styles.styleSheet){
styles.styleSheet.cssText = css;
} else {
styles.appendChild(document.createTextNode(css));
}
head.appendChild(styles);
""")
def enable_css_animations(page):
"""
Enable CSS3 animations, transitions, transforms.
"""
page.browser.execute_script("""
var styles = document.getElementById('no-transitions'),
head = document.head || document.getElementsByTagName('head')[0];
head.removeChild(styles)
""")
class UniqueCourseTest(WebAppTest):
"""
Test that provides a unique course ID.
"""
def __init__(self, *args, **kwargs):
"""
Create a unique course ID.
"""
super(UniqueCourseTest, self).__init__(*args, **kwargs)
def setUp(self):
super(UniqueCourseTest, self).setUp()
self.course_info = {
'org': 'test_org',
'number': self.unique_id,
'run': 'test_run',
'display_name': 'Test Course' + self.unique_id
}
@property
def course_id(self):
"""
Returns the serialized course_key for the test
"""
# TODO - is there a better way to make this agnostic to the underlying default module store?
default_store = os.environ.get('DEFAULT_STORE', 'draft')
course_key = CourseLocator(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
deprecated=(default_store == 'draft')
)
return unicode(course_key)
class YouTubeConfigError(Exception):
"""
Error occurred while configuring YouTube Stub Server.
"""
pass
class YouTubeStubConfig(object):
"""
Configure YouTube Stub Server.
"""
PORT = 9080
URL = 'http://127.0.0.1:{}/'.format(PORT)
@classmethod
def configure(cls, config):
"""
Allow callers to configure the stub server using the /set_config URL.
Arguments:
config (dict): Configuration dictionary.
Raises:
YouTubeConfigError
"""
youtube_stub_config_url = cls.URL + 'set_config'
config_data = {param: json.dumps(value) for param, value in config.items()}
response = requests.put(youtube_stub_config_url, data=config_data)
if not response.ok:
raise YouTubeConfigError(
'YouTube Server Configuration Failed. URL {0}, Configuration Data: {1}, Status was {2}'.format(
youtube_stub_config_url, config, response.status_code))
@classmethod
def reset(cls):
"""
Reset YouTube Stub Server Configurations using the /del_config URL.
Raises:
YouTubeConfigError
"""
youtube_stub_config_url = cls.URL + 'del_config'
response = requests.delete(youtube_stub_config_url)
if not response.ok:
raise YouTubeConfigError(
'YouTube Server Configuration Failed. URL: {0} Status was {1}'.format(
youtube_stub_config_url, response.status_code))
@classmethod
def get_configuration(cls):
"""
Allow callers to get current stub server configuration.
Returns:
dict
"""
youtube_stub_config_url = cls.URL + 'get_config'
response = requests.get(youtube_stub_config_url)
if response.ok:
return json.loads(response.content)
else:
return {}
| agpl-3.0 |
GiladE/birde | venv/lib/python2.7/site-packages/pip/_vendor/requests/compat.py | 571 | 2556 | # -*- coding: utf-8 -*-
"""
pythoncompat
"""
from .packages import chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
#: Python 3.0.x
is_py30 = (is_py3 and _ver[1] == 0)
#: Python 3.1.x
is_py31 = (is_py3 and _ver[1] == 1)
#: Python 3.2.x
is_py32 = (is_py3 and _ver[1] == 2)
#: Python 3.3.x
is_py33 = (is_py3 and _ver[1] == 3)
#: Python 3.4.x
is_py34 = (is_py3 and _ver[1] == 4)
#: Python 2.7.x
is_py27 = (is_py2 and _ver[1] == 7)
#: Python 2.6.x
is_py26 = (is_py2 and _ver[1] == 6)
#: Python 2.5.x
is_py25 = (is_py2 and _ver[1] == 5)
#: Python 2.4.x
is_py24 = (is_py2 and _ver[1] == 4) # I'm assuming this is not by choice.
# ---------
# Platforms
# ---------
# Syntax sugar.
_ver = sys.version.lower()
is_pypy = ('pypy' in _ver)
is_jython = ('jython' in _ver)
is_ironpython = ('iron' in _ver)
# Assume CPython, if nothing else.
is_cpython = not any((is_pypy, is_jython, is_ironpython))
# Windows-based system.
is_windows = 'win32' in str(sys.platform).lower()
# Standard Linux 2+ system.
is_linux = ('linux' in str(sys.platform).lower())
is_osx = ('darwin' in str(sys.platform).lower())
is_hpux = ('hpux' in str(sys.platform).lower()) # Complete guess.
is_solaris = ('solar==' in str(sys.platform).lower()) # Complete guess.
try:
import simplejson as json
except ImportError:
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
from httplib import IncompleteRead
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
from http.client import IncompleteRead
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
| mit |
goliate/sarakha63-persomov | libs/gntp/notifier.py | 122 | 8299 | # Copyright: 2013 Paul Traylor
# These sources are released under the terms of the MIT license: see LICENSE
"""
The gntp.notifier module is provided as a simple way to send notifications
using GNTP
.. note::
This class is intended to mostly mirror the older Python bindings such
that you should be able to replace instances of the old bindings with
this class.
`Original Python bindings <http://code.google.com/p/growl/source/browse/Bindings/python/Growl.py>`_
"""
import logging
import platform
import socket
import sys
from gntp.version import __version__
import gntp.core
import gntp.errors as errors
import gntp.shim
__all__ = [
'mini',
'GrowlNotifier',
]
logger = logging.getLogger(__name__)
class GrowlNotifier(object):
"""Helper class to simplfy sending Growl messages
:param string applicationName: Sending application name
:param list notification: List of valid notifications
:param list defaultNotifications: List of notifications that should be enabled
by default
:param string applicationIcon: Icon URL
:param string hostname: Remote host
:param integer port: Remote port
"""
passwordHash = 'MD5'
socketTimeout = 3
def __init__(self, applicationName='Python GNTP', notifications=[],
defaultNotifications=None, applicationIcon=None, hostname='localhost',
password=None, port=23053):
self.applicationName = applicationName
self.notifications = list(notifications)
if defaultNotifications:
self.defaultNotifications = list(defaultNotifications)
else:
self.defaultNotifications = self.notifications
self.applicationIcon = applicationIcon
self.password = password
self.hostname = hostname
self.port = int(port)
def _checkIcon(self, data):
'''
Check the icon to see if it's valid
If it's a simple URL icon, then we return True. If it's a data icon
then we return False
'''
logger.info('Checking icon')
return gntp.shim.u(data).startswith('http')
def register(self):
"""Send GNTP Registration
.. warning::
Before sending notifications to Growl, you need to have
sent a registration message at least once
"""
logger.info('Sending registration to %s:%s', self.hostname, self.port)
register = gntp.core.GNTPRegister()
register.add_header('Application-Name', self.applicationName)
for notification in self.notifications:
enabled = notification in self.defaultNotifications
register.add_notification(notification, enabled)
if self.applicationIcon:
if self._checkIcon(self.applicationIcon):
register.add_header('Application-Icon', self.applicationIcon)
else:
resource = register.add_resource(self.applicationIcon)
register.add_header('Application-Icon', resource)
if self.password:
register.set_password(self.password, self.passwordHash)
self.add_origin_info(register)
self.register_hook(register)
return self._send('register', register)
def notify(self, noteType, title, description, icon=None, sticky=False,
priority=None, callback=None, identifier=None, custom={}):
"""Send a GNTP notifications
.. warning::
Must have registered with growl beforehand or messages will be ignored
:param string noteType: One of the notification names registered earlier
:param string title: Notification title (usually displayed on the notification)
:param string description: The main content of the notification
:param string icon: Icon URL path
:param boolean sticky: Sticky notification
:param integer priority: Message priority level from -2 to 2
:param string callback: URL callback
:param dict custom: Custom attributes. Key names should be prefixed with X-
according to the spec but this is not enforced by this class
.. warning::
For now, only URL callbacks are supported. In the future, the
callback argument will also support a function
"""
logger.info('Sending notification [%s] to %s:%s', noteType, self.hostname, self.port)
assert noteType in self.notifications
notice = gntp.core.GNTPNotice()
notice.add_header('Application-Name', self.applicationName)
notice.add_header('Notification-Name', noteType)
notice.add_header('Notification-Title', title)
if self.password:
notice.set_password(self.password, self.passwordHash)
if sticky:
notice.add_header('Notification-Sticky', sticky)
if priority:
notice.add_header('Notification-Priority', priority)
if icon:
if self._checkIcon(icon):
notice.add_header('Notification-Icon', icon)
else:
resource = notice.add_resource(icon)
notice.add_header('Notification-Icon', resource)
if description:
notice.add_header('Notification-Text', description)
if callback:
notice.add_header('Notification-Callback-Target', callback)
if identifier:
notice.add_header('Notification-Coalescing-ID', identifier)
for key in custom:
notice.add_header(key, custom[key])
self.add_origin_info(notice)
self.notify_hook(notice)
return self._send('notify', notice)
def subscribe(self, id, name, port):
"""Send a Subscribe request to a remote machine"""
sub = gntp.core.GNTPSubscribe()
sub.add_header('Subscriber-ID', id)
sub.add_header('Subscriber-Name', name)
sub.add_header('Subscriber-Port', port)
if self.password:
sub.set_password(self.password, self.passwordHash)
self.add_origin_info(sub)
self.subscribe_hook(sub)
return self._send('subscribe', sub)
def add_origin_info(self, packet):
"""Add optional Origin headers to message"""
packet.add_header('Origin-Machine-Name', platform.node())
packet.add_header('Origin-Software-Name', 'gntp.py')
packet.add_header('Origin-Software-Version', __version__)
packet.add_header('Origin-Platform-Name', platform.system())
packet.add_header('Origin-Platform-Version', platform.platform())
def register_hook(self, packet):
pass
def notify_hook(self, packet):
pass
def subscribe_hook(self, packet):
pass
def _send(self, messagetype, packet):
"""Send the GNTP Packet"""
packet.validate()
data = packet.encode()
logger.debug('To : %s:%s <%s>\n%s', self.hostname, self.port, packet.__class__, data)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(self.socketTimeout)
try:
s.connect((self.hostname, self.port))
s.send(data)
recv_data = s.recv(1024)
while not recv_data.endswith(gntp.shim.b("\r\n\r\n")):
recv_data += s.recv(1024)
except socket.error:
# Python2.5 and Python3 compatibile exception
exc = sys.exc_info()[1]
raise errors.NetworkError(exc)
response = gntp.core.parse_gntp(recv_data)
s.close()
logger.debug('From : %s:%s <%s>\n%s', self.hostname, self.port, response.__class__, response)
if type(response) == gntp.core.GNTPOK:
return True
logger.error('Invalid response: %s', response.error())
return response.error()
def mini(description, applicationName='PythonMini', noteType="Message",
title="Mini Message", applicationIcon=None, hostname='localhost',
password=None, port=23053, sticky=False, priority=None,
callback=None, notificationIcon=None, identifier=None,
notifierFactory=GrowlNotifier):
"""Single notification function
Simple notification function in one line. Has only one required parameter
and attempts to use reasonable defaults for everything else
:param string description: Notification message
.. warning::
For now, only URL callbacks are supported. In the future, the
callback argument will also support a function
"""
try:
growl = notifierFactory(
applicationName=applicationName,
notifications=[noteType],
defaultNotifications=[noteType],
applicationIcon=applicationIcon,
hostname=hostname,
password=password,
port=port,
)
result = growl.register()
if result is not True:
return result
return growl.notify(
noteType=noteType,
title=title,
description=description,
icon=notificationIcon,
sticky=sticky,
priority=priority,
callback=callback,
identifier=identifier,
)
except Exception:
# We want the "mini" function to be simple and swallow Exceptions
# in order to be less invasive
logger.exception("Growl error")
if __name__ == '__main__':
# If we're running this module directly we're likely running it as a test
# so extra debugging is useful
logging.basicConfig(level=logging.INFO)
mini('Testing mini notification')
| gpl-3.0 |
elventear/ansible | lib/ansible/plugins/callback/minimal.py | 10 | 3874 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.callback import CallbackBase
from ansible import constants as C
class CallbackModule(CallbackBase):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'minimal'
def _command_generic_msg(self, host, result, caption):
''' output the result of a command run '''
buf = "%s | %s | rc=%s >>\n" % (host, caption, result.get('rc', -1))
buf += result.get('stdout','')
buf += result.get('stderr','')
buf += result.get('msg','')
return buf + "\n"
def v2_runner_on_failed(self, result, ignore_errors=False):
if 'exception' in result._result:
if self._display.verbosity < 3:
# extract just the actual error message from the exception text
error = result._result['exception'].strip().split('\n')[-1]
msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
else:
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
self._display.display(msg, color=C.COLOR_ERROR)
self._handle_warnings(result._result)
if result._task.action in C.MODULE_NO_JSON and 'module_stderr' not in result._result:
self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "FAILED"), color=C.COLOR_ERROR)
else:
self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_ERROR)
def v2_runner_on_ok(self, result):
self._clean_results(result._result, result._task.action)
self._handle_warnings(result._result)
if result._task.action in C.MODULE_NO_JSON:
self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "SUCCESS"), color=C.COLOR_OK)
else:
if 'changed' in result._result and result._result['changed']:
self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_CHANGED)
else:
self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_OK)
def v2_runner_on_skipped(self, result):
self._display.display("%s | SKIPPED" % (result._host.get_name()), color=C.COLOR_SKIP)
def v2_runner_on_unreachable(self, result):
self._display.display("%s | UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_UNREACHABLE)
def v2_on_file_diff(self, result):
if 'diff' in result._result and result._result['diff']:
self._display.display(self._get_diff(result._result['diff']))
| gpl-3.0 |
2015fallproject/2015fallcase1 | static/Brython3.2.0-20150701-214155/Lib/encodings/ptcp154.py | 34 | 14327 | """ Python Character Mapping Codec generated from 'PTCP154.txt' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='ptcp154',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE (DEL)
'\u0496' # 0x80 -> CYRILLIC CAPITAL LETTER ZHE WITH DESCENDER
'\u0492' # 0x81 -> CYRILLIC CAPITAL LETTER GHE WITH STROKE
'\u04ee' # 0x82 -> CYRILLIC CAPITAL LETTER U WITH MACRON
'\u0493' # 0x83 -> CYRILLIC SMALL LETTER GHE WITH STROKE
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u04b6' # 0x86 -> CYRILLIC CAPITAL LETTER CHE WITH DESCENDER
'\u04ae' # 0x87 -> CYRILLIC CAPITAL LETTER STRAIGHT U
'\u04b2' # 0x88 -> CYRILLIC CAPITAL LETTER HA WITH DESCENDER
'\u04af' # 0x89 -> CYRILLIC SMALL LETTER STRAIGHT U
'\u04a0' # 0x8A -> CYRILLIC CAPITAL LETTER BASHKIR KA
'\u04e2' # 0x8B -> CYRILLIC CAPITAL LETTER I WITH MACRON
'\u04a2' # 0x8C -> CYRILLIC CAPITAL LETTER EN WITH DESCENDER
'\u049a' # 0x8D -> CYRILLIC CAPITAL LETTER KA WITH DESCENDER
'\u04ba' # 0x8E -> CYRILLIC CAPITAL LETTER SHHA
'\u04b8' # 0x8F -> CYRILLIC CAPITAL LETTER CHE WITH VERTICAL STROKE
'\u0497' # 0x90 -> CYRILLIC SMALL LETTER ZHE WITH DESCENDER
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\u04b3' # 0x98 -> CYRILLIC SMALL LETTER HA WITH DESCENDER
'\u04b7' # 0x99 -> CYRILLIC SMALL LETTER CHE WITH DESCENDER
'\u04a1' # 0x9A -> CYRILLIC SMALL LETTER BASHKIR KA
'\u04e3' # 0x9B -> CYRILLIC SMALL LETTER I WITH MACRON
'\u04a3' # 0x9C -> CYRILLIC SMALL LETTER EN WITH DESCENDER
'\u049b' # 0x9D -> CYRILLIC SMALL LETTER KA WITH DESCENDER
'\u04bb' # 0x9E -> CYRILLIC SMALL LETTER SHHA
'\u04b9' # 0x9F -> CYRILLIC SMALL LETTER CHE WITH VERTICAL STROKE
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u040e' # 0xA1 -> CYRILLIC CAPITAL LETTER SHORT U (Byelorussian)
'\u045e' # 0xA2 -> CYRILLIC SMALL LETTER SHORT U (Byelorussian)
'\u0408' # 0xA3 -> CYRILLIC CAPITAL LETTER JE
'\u04e8' # 0xA4 -> CYRILLIC CAPITAL LETTER BARRED O
'\u0498' # 0xA5 -> CYRILLIC CAPITAL LETTER ZE WITH DESCENDER
'\u04b0' # 0xA6 -> CYRILLIC CAPITAL LETTER STRAIGHT U WITH STROKE
'\xa7' # 0xA7 -> SECTION SIGN
'\u0401' # 0xA8 -> CYRILLIC CAPITAL LETTER IO
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u04d8' # 0xAA -> CYRILLIC CAPITAL LETTER SCHWA
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\u04ef' # 0xAD -> CYRILLIC SMALL LETTER U WITH MACRON
'\xae' # 0xAE -> REGISTERED SIGN
'\u049c' # 0xAF -> CYRILLIC CAPITAL LETTER KA WITH VERTICAL STROKE
'\xb0' # 0xB0 -> DEGREE SIGN
'\u04b1' # 0xB1 -> CYRILLIC SMALL LETTER STRAIGHT U WITH STROKE
'\u0406' # 0xB2 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
'\u0456' # 0xB3 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
'\u0499' # 0xB4 -> CYRILLIC SMALL LETTER ZE WITH DESCENDER
'\u04e9' # 0xB5 -> CYRILLIC SMALL LETTER BARRED O
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\u0451' # 0xB8 -> CYRILLIC SMALL LETTER IO
'\u2116' # 0xB9 -> NUMERO SIGN
'\u04d9' # 0xBA -> CYRILLIC SMALL LETTER SCHWA
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u0458' # 0xBC -> CYRILLIC SMALL LETTER JE
'\u04aa' # 0xBD -> CYRILLIC CAPITAL LETTER ES WITH DESCENDER
'\u04ab' # 0xBE -> CYRILLIC SMALL LETTER ES WITH DESCENDER
'\u049d' # 0xBF -> CYRILLIC SMALL LETTER KA WITH VERTICAL STROKE
'\u0410' # 0xC0 -> CYRILLIC CAPITAL LETTER A
'\u0411' # 0xC1 -> CYRILLIC CAPITAL LETTER BE
'\u0412' # 0xC2 -> CYRILLIC CAPITAL LETTER VE
'\u0413' # 0xC3 -> CYRILLIC CAPITAL LETTER GHE
'\u0414' # 0xC4 -> CYRILLIC CAPITAL LETTER DE
'\u0415' # 0xC5 -> CYRILLIC CAPITAL LETTER IE
'\u0416' # 0xC6 -> CYRILLIC CAPITAL LETTER ZHE
'\u0417' # 0xC7 -> CYRILLIC CAPITAL LETTER ZE
'\u0418' # 0xC8 -> CYRILLIC CAPITAL LETTER I
'\u0419' # 0xC9 -> CYRILLIC CAPITAL LETTER SHORT I
'\u041a' # 0xCA -> CYRILLIC CAPITAL LETTER KA
'\u041b' # 0xCB -> CYRILLIC CAPITAL LETTER EL
'\u041c' # 0xCC -> CYRILLIC CAPITAL LETTER EM
'\u041d' # 0xCD -> CYRILLIC CAPITAL LETTER EN
'\u041e' # 0xCE -> CYRILLIC CAPITAL LETTER O
'\u041f' # 0xCF -> CYRILLIC CAPITAL LETTER PE
'\u0420' # 0xD0 -> CYRILLIC CAPITAL LETTER ER
'\u0421' # 0xD1 -> CYRILLIC CAPITAL LETTER ES
'\u0422' # 0xD2 -> CYRILLIC CAPITAL LETTER TE
'\u0423' # 0xD3 -> CYRILLIC CAPITAL LETTER U
'\u0424' # 0xD4 -> CYRILLIC CAPITAL LETTER EF
'\u0425' # 0xD5 -> CYRILLIC CAPITAL LETTER HA
'\u0426' # 0xD6 -> CYRILLIC CAPITAL LETTER TSE
'\u0427' # 0xD7 -> CYRILLIC CAPITAL LETTER CHE
'\u0428' # 0xD8 -> CYRILLIC CAPITAL LETTER SHA
'\u0429' # 0xD9 -> CYRILLIC CAPITAL LETTER SHCHA
'\u042a' # 0xDA -> CYRILLIC CAPITAL LETTER HARD SIGN
'\u042b' # 0xDB -> CYRILLIC CAPITAL LETTER YERU
'\u042c' # 0xDC -> CYRILLIC CAPITAL LETTER SOFT SIGN
'\u042d' # 0xDD -> CYRILLIC CAPITAL LETTER E
'\u042e' # 0xDE -> CYRILLIC CAPITAL LETTER YU
'\u042f' # 0xDF -> CYRILLIC CAPITAL LETTER YA
'\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A
'\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE
'\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE
'\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE
'\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE
'\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE
'\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE
'\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE
'\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I
'\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I
'\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA
'\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL
'\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM
'\u043d' # 0xED -> CYRILLIC SMALL LETTER EN
'\u043e' # 0xEE -> CYRILLIC SMALL LETTER O
'\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE
'\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER
'\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES
'\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE
'\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U
'\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF
'\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA
'\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE
'\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE
'\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA
'\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA
'\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN
'\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU
'\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN
'\u044d' # 0xFD -> CYRILLIC SMALL LETTER E
'\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU
'\u044f' # 0xFF -> CYRILLIC SMALL LETTER YA
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| agpl-3.0 |
alshedivat/tensorflow | tensorflow/compiler/tests/oom_test.py | 7 | 2989 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for out-of-memory conditions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
class OutOfMemoryTest(xla_test.XLATestCase):
def testOutputOutOfMemory(self):
"""Allocates tensors until out of memory.
Generates a large rank-1 tensor. The tensor is an output of an XLA
computation, not constant.
Check that a ResourceExhaustedError is raised and can be caught.
We spin in a loop generating larger and larger tensors until an OOM event
happens. We may be running sandboxed, so have a small host memory limit, so
any hardcoded value is unlikely to land in the sweet spot between device
memory size and host memory size with stability.
"""
def test_loop():
size = int(2e8)
while True:
with self.cached_session():
# Force the compiled code to not be constant by feeding in a
# parameter.
p = array_ops.placeholder(dtypes.float32, shape=[2, 1, 1])
with self.test_scope():
# Create a computation that produces a large R1 tensor as an
# intermediate result. Reduce it down so that if this file was
# compiled without --config=cuda, we don't force a D2H copy of a
# large tensor and potentially OOM the host.
#
# This is a bit tricky because XLA:GPU doesn't currently support RNG
# ops. Here we rely on the fact that XLA doesn't do algebraic
# simplifications on conv(<ones>, <filter>).
c = math_ops.reduce_sum(
nn_ops.convolution(
array_ops.ones([1, size, 1]),
p,
padding='SAME',
data_format='NWC'))
c.eval(feed_dict={p: [[[1.0]], [[2.0]]]})
size *= 2
self.assertRaises(errors.ResourceExhaustedError, test_loop)
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
CKiilu/scurrae | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/pygments/formatters/img.py | 268 | 18059 | # -*- coding: utf-8 -*-
"""
pygments.formatters.img
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for Pixmap output.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
from pygments.formatter import Formatter
from pygments.util import get_bool_opt, get_int_opt, \
get_list_opt, get_choice_opt
# Import this carefully
try:
from PIL import Image, ImageDraw, ImageFont
pil_available = True
except ImportError:
pil_available = False
try:
import _winreg
except ImportError:
_winreg = None
__all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter',
'BmpImageFormatter']
# For some unknown reason every font calls it something different
STYLES = {
'NORMAL': ['', 'Roman', 'Book', 'Normal', 'Regular', 'Medium'],
'ITALIC': ['Oblique', 'Italic'],
'BOLD': ['Bold'],
'BOLDITALIC': ['Bold Oblique', 'Bold Italic'],
}
# A sane default for modern systems
DEFAULT_FONT_NAME_NIX = 'Bitstream Vera Sans Mono'
DEFAULT_FONT_NAME_WIN = 'Courier New'
class PilNotAvailable(ImportError):
"""When Python imaging library is not available"""
class FontNotFound(Exception):
"""When there are no usable fonts specified"""
class FontManager(object):
"""
Manages a set of fonts: normal, italic, bold, etc...
"""
def __init__(self, font_name, font_size=14):
self.font_name = font_name
self.font_size = font_size
self.fonts = {}
self.encoding = None
if sys.platform.startswith('win'):
if not font_name:
self.font_name = DEFAULT_FONT_NAME_WIN
self._create_win()
else:
if not font_name:
self.font_name = DEFAULT_FONT_NAME_NIX
self._create_nix()
def _get_nix_font_path(self, name, style):
from commands import getstatusoutput
exit, out = getstatusoutput('fc-list "%s:style=%s" file' %
(name, style))
if not exit:
lines = out.splitlines()
if lines:
path = lines[0].strip().strip(':')
return path
def _create_nix(self):
for name in STYLES['NORMAL']:
path = self._get_nix_font_path(self.font_name, name)
if path is not None:
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
break
else:
raise FontNotFound('No usable fonts named: "%s"' %
self.font_name)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
for stylename in STYLES[style]:
path = self._get_nix_font_path(self.font_name, stylename)
if path is not None:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
break
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
def _lookup_win(self, key, basename, styles, fail=False):
for suffix in ('', ' (TrueType)'):
for style in styles:
try:
valname = '%s%s%s' % (basename, style and ' '+style, suffix)
val, _ = _winreg.QueryValueEx(key, valname)
return val
except EnvironmentError:
continue
else:
if fail:
raise FontNotFound('Font %s (%s) not found in registry' %
(basename, styles[0]))
return None
def _create_win(self):
try:
key = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\Windows NT\CurrentVersion\Fonts')
except EnvironmentError:
try:
key = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\Windows\CurrentVersion\Fonts')
except EnvironmentError:
raise FontNotFound('Can\'t open Windows font registry key')
try:
path = self._lookup_win(key, self.font_name, STYLES['NORMAL'], True)
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
path = self._lookup_win(key, self.font_name, STYLES[style])
if path:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
finally:
_winreg.CloseKey(key)
def get_char_size(self):
"""
Get the character size.
"""
return self.fonts['NORMAL'].getsize('M')
def get_font(self, bold, oblique):
"""
Get the font based on bold and italic flags.
"""
if bold and oblique:
return self.fonts['BOLDITALIC']
elif bold:
return self.fonts['BOLD']
elif oblique:
return self.fonts['ITALIC']
else:
return self.fonts['NORMAL']
class ImageFormatter(Formatter):
"""
Create a PNG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
*New in Pygments 0.10.*
Additional options accepted:
`image_format`
An image format to output to that is recognised by PIL, these include:
* "PNG" (default)
* "JPEG"
* "BMP"
* "GIF"
`line_pad`
The extra spacing (in pixels) between each line of text.
Default: 2
`font_name`
The font name to be used as the base font from which others, such as
bold and italic fonts will be generated. This really should be a
monospace font to look sane.
Default: "Bitstream Vera Sans Mono"
`font_size`
The font size in points to be used.
Default: 14
`image_pad`
The padding, in pixels to be used at each edge of the resulting image.
Default: 10
`line_numbers`
Whether line numbers should be shown: True/False
Default: True
`line_number_start`
The line number of the first line.
Default: 1
`line_number_step`
The step used when printing line numbers.
Default: 1
`line_number_bg`
The background colour (in "#123456" format) of the line number bar, or
None to use the style background color.
Default: "#eed"
`line_number_fg`
The text color of the line numbers (in "#123456"-like format).
Default: "#886"
`line_number_chars`
The number of columns of line numbers allowable in the line number
margin.
Default: 2
`line_number_bold`
Whether line numbers will be bold: True/False
Default: False
`line_number_italic`
Whether line numbers will be italicized: True/False
Default: False
`line_number_separator`
Whether a line will be drawn between the line number area and the
source code area: True/False
Default: True
`line_number_pad`
The horizontal padding (in pixels) between the line number margin, and
the source code area.
Default: 6
`hl_lines`
Specify a list of lines to be highlighted. *New in Pygments 1.2.*
Default: empty list
`hl_color`
Specify the color for highlighting lines. *New in Pygments 1.2.*
Default: highlight color of the selected style
"""
# Required by the pygments mapper
name = 'img'
aliases = ['img', 'IMG', 'png']
filenames = ['*.png']
unicodeoutput = False
default_image_format = 'png'
def __init__(self, **options):
"""
See the class docstring for explanation of options.
"""
if not pil_available:
raise PilNotAvailable(
'Python Imaging Library is required for this formatter')
Formatter.__init__(self, **options)
# Read the style
self.styles = dict(self.style)
if self.style.background_color is None:
self.background_color = '#fff'
else:
self.background_color = self.style.background_color
# Image options
self.image_format = get_choice_opt(
options, 'image_format', ['png', 'jpeg', 'gif', 'bmp'],
self.default_image_format, normcase=True)
self.image_pad = get_int_opt(options, 'image_pad', 10)
self.line_pad = get_int_opt(options, 'line_pad', 2)
# The fonts
fontsize = get_int_opt(options, 'font_size', 14)
self.fonts = FontManager(options.get('font_name', ''), fontsize)
self.fontw, self.fonth = self.fonts.get_char_size()
# Line number options
self.line_number_fg = options.get('line_number_fg', '#886')
self.line_number_bg = options.get('line_number_bg', '#eed')
self.line_number_chars = get_int_opt(options,
'line_number_chars', 2)
self.line_number_bold = get_bool_opt(options,
'line_number_bold', False)
self.line_number_italic = get_bool_opt(options,
'line_number_italic', False)
self.line_number_pad = get_int_opt(options, 'line_number_pad', 6)
self.line_numbers = get_bool_opt(options, 'line_numbers', True)
self.line_number_separator = get_bool_opt(options,
'line_number_separator', True)
self.line_number_step = get_int_opt(options, 'line_number_step', 1)
self.line_number_start = get_int_opt(options, 'line_number_start', 1)
if self.line_numbers:
self.line_number_width = (self.fontw * self.line_number_chars +
self.line_number_pad * 2)
else:
self.line_number_width = 0
self.hl_lines = []
hl_lines_str = get_list_opt(options, 'hl_lines', [])
for line in hl_lines_str:
try:
self.hl_lines.append(int(line))
except ValueError:
pass
self.hl_color = options.get('hl_color',
self.style.highlight_color) or '#f90'
self.drawables = []
def get_style_defs(self, arg=''):
raise NotImplementedError('The -S option is meaningless for the image '
'formatter. Use -O style=<stylename> instead.')
def _get_line_height(self):
"""
Get the height of a line.
"""
return self.fonth + self.line_pad
def _get_line_y(self, lineno):
"""
Get the Y coordinate of a line number.
"""
return lineno * self._get_line_height() + self.image_pad
def _get_char_width(self):
"""
Get the width of a character.
"""
return self.fontw
def _get_char_x(self, charno):
"""
Get the X coordinate of a character position.
"""
return charno * self.fontw + self.image_pad + self.line_number_width
def _get_text_pos(self, charno, lineno):
"""
Get the actual position for a character and line position.
"""
return self._get_char_x(charno), self._get_line_y(lineno)
def _get_linenumber_pos(self, lineno):
"""
Get the actual position for the start of a line number.
"""
return (self.image_pad, self._get_line_y(lineno))
def _get_text_color(self, style):
"""
Get the correct color for the token from the style.
"""
if style['color'] is not None:
fill = '#' + style['color']
else:
fill = '#000'
return fill
def _get_style_font(self, style):
"""
Get the correct font for the style.
"""
return self.fonts.get_font(style['bold'], style['italic'])
def _get_image_size(self, maxcharno, maxlineno):
"""
Get the required image size.
"""
return (self._get_char_x(maxcharno) + self.image_pad,
self._get_line_y(maxlineno + 0) + self.image_pad)
def _draw_linenumber(self, posno, lineno):
"""
Remember a line number drawable to paint later.
"""
self._draw_text(
self._get_linenumber_pos(posno),
str(lineno).rjust(self.line_number_chars),
font=self.fonts.get_font(self.line_number_bold,
self.line_number_italic),
fill=self.line_number_fg,
)
def _draw_text(self, pos, text, font, **kw):
"""
Remember a single drawable tuple to paint later.
"""
self.drawables.append((pos, text, font, kw))
def _create_drawables(self, tokensource):
"""
Create drawables for the token content.
"""
lineno = charno = maxcharno = 0
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
style = self.styles[ttype]
# TODO: make sure tab expansion happens earlier in the chain. It
# really ought to be done on the input, as to do it right here is
# quite complex.
value = value.expandtabs(4)
lines = value.splitlines(True)
#print lines
for i, line in enumerate(lines):
temp = line.rstrip('\n')
if temp:
self._draw_text(
self._get_text_pos(charno, lineno),
temp,
font = self._get_style_font(style),
fill = self._get_text_color(style)
)
charno += len(temp)
maxcharno = max(maxcharno, charno)
if line.endswith('\n'):
# add a line for each extra line in the value
charno = 0
lineno += 1
self.maxcharno = maxcharno
self.maxlineno = lineno
def _draw_line_numbers(self):
"""
Create drawables for the line numbers.
"""
if not self.line_numbers:
return
for p in xrange(self.maxlineno):
n = p + self.line_number_start
if (n % self.line_number_step) == 0:
self._draw_linenumber(p, n)
def _paint_line_number_bg(self, im):
"""
Paint the line number background on the image.
"""
if not self.line_numbers:
return
if self.line_number_fg is None:
return
draw = ImageDraw.Draw(im)
recth = im.size[-1]
rectw = self.image_pad + self.line_number_width - self.line_number_pad
draw.rectangle([(0, 0),
(rectw, recth)],
fill=self.line_number_bg)
draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
del draw
def format(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
This implementation calculates where it should draw each token on the
pixmap, then calculates the required pixmap size and draws the items.
"""
self._create_drawables(tokensource)
self._draw_line_numbers()
im = Image.new(
'RGB',
self._get_image_size(self.maxcharno, self.maxlineno),
self.background_color
)
self._paint_line_number_bg(im)
draw = ImageDraw.Draw(im)
# Highlight
if self.hl_lines:
x = self.image_pad + self.line_number_width - self.line_number_pad + 1
recth = self._get_line_height()
rectw = im.size[0] - x
for linenumber in self.hl_lines:
y = self._get_line_y(linenumber - 1)
draw.rectangle([(x, y), (x + rectw, y + recth)],
fill=self.hl_color)
for pos, value, font, kw in self.drawables:
draw.text(pos, value, font=font, **kw)
im.save(outfile, self.image_format.upper())
# Add one formatter per format, so that the "-f gif" option gives the correct result
# when used in pygmentize.
class GifImageFormatter(ImageFormatter):
"""
Create a GIF image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
*New in Pygments 1.0.* (You could create GIF images before by passing a
suitable `image_format` option to the `ImageFormatter`.)
"""
name = 'img_gif'
aliases = ['gif']
filenames = ['*.gif']
default_image_format = 'gif'
class JpgImageFormatter(ImageFormatter):
"""
Create a JPEG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
*New in Pygments 1.0.* (You could create JPEG images before by passing a
suitable `image_format` option to the `ImageFormatter`.)
"""
name = 'img_jpg'
aliases = ['jpg', 'jpeg']
filenames = ['*.jpg']
default_image_format = 'jpeg'
class BmpImageFormatter(ImageFormatter):
"""
Create a bitmap image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
*New in Pygments 1.0.* (You could create bitmap images before by passing a
suitable `image_format` option to the `ImageFormatter`.)
"""
name = 'img_bmp'
aliases = ['bmp', 'bitmap']
filenames = ['*.bmp']
default_image_format = 'bmp'
| mit |
SusanJL/iris | lib/iris/tests/unit/analysis/scipy_interpolate/test__RegularGridInterpolator.py | 16 | 3938 | # (C) British Crown Copyright 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the
:func:`iris.analysis._scipy_interpolate._RegularGridInterpolator` class."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
import iris
from iris.analysis._scipy_interpolate import _RegularGridInterpolator
from scipy.sparse.csr import csr_matrix
from iris.tests import mock
import iris.tests.stock as stock
class Test(tests.IrisTest):
def setUp(self):
# Load a source cube, then generate an interpolator instance, calculate
# the interpolation weights and set up a target grid.
self.cube = stock.simple_2d()
x_points = self.cube.coord('bar').points
y_points = self.cube.coord('foo').points
self.interpolator = _RegularGridInterpolator([x_points, y_points],
self.cube.data,
method='linear',
bounds_error=False,
fill_value=None)
newx = x_points + 0.7
newy = y_points + 0.7
d_0 = self.cube.data[0, 0]
d_1 = self.cube.data[0, 1]
d_2 = self.cube.data[1, 0]
d_3 = self.cube.data[1, 1]
px_0, px_1 = x_points[0], x_points[1]
py_0, py_1 = y_points[0], y_points[1]
px_t = px_0 + 0.7
py_t = py_0 + 0.7
dyt_0 = self._interpolate_point(py_t, py_0, py_1, d_0, d_1)
dyt_1 = self._interpolate_point(py_t, py_0, py_1, d_2, d_3)
self.test_increment = self._interpolate_point(px_t, px_0, px_1,
dyt_0, dyt_1)
xv, yv = np.meshgrid(newy, newx)
self.tgrid = np.dstack((yv, xv))
self.weights = self.interpolator.compute_interp_weights(self.tgrid)
@staticmethod
def _interpolate_point(p_t, p_0, p_1, d_0, d_1):
return d_0 + (d_1 - d_0)*((p_t - p_0)/(p_1 - p_0))
def test_compute_interp_weights(self):
weights = self.weights
self.assertIsInstance(weights, tuple)
self.assertEqual(len(weights), 5)
self.assertEqual(weights[0], self.tgrid.shape)
self.assertEqual(weights[1], 'linear')
self.assertIsInstance(weights[2], csr_matrix)
def test__evaluate_linear_sparse(self):
interpolator = self.interpolator
weights = self.weights
output_data = interpolator._evaluate_linear_sparse(weights[2])
test_data = self.cube.data.reshape(-1) + self.test_increment
self.assertArrayAlmostEqual(output_data, test_data)
def test_interp_using_pre_computed_weights(self):
interpolator = self.interpolator
weights = self.weights
output_data = interpolator.interp_using_pre_computed_weights(weights)
test_data = self.cube.data + self.test_increment
self.assertEqual(output_data.shape, self.cube.data.shape)
self.assertArrayAlmostEqual(output_data, test_data)
if __name__ == "__main__":
tests.main()
| gpl-3.0 |
benjaminbrinkman/open-ad-platform | .venv/lib/python3.4/site-packages/pbr/options.py | 99 | 2371 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (C) 2013 Association of Universities for Research in Astronomy
# (AURA)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of AURA and its representatives may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
import os
TRUE_VALUES = ('true', '1', 'yes')
def get_boolean_option(option_dict, option_name, env_name):
return ((option_name in option_dict
and option_dict[option_name][1].lower() in TRUE_VALUES) or
str(os.getenv(env_name)).lower() in TRUE_VALUES)
| mit |
unseenlaser/python-for-android | python-modules/twisted/twisted/python/dist.py | 60 | 12329 | """
Distutils convenience functionality.
Don't use this outside of Twisted.
Maintainer: Christopher Armstrong
"""
import sys, os
from distutils.command import build_scripts, install_data, build_ext, build_py
from distutils.errors import CompileError
from distutils import core
from distutils.core import Extension
twisted_subprojects = ["conch", "lore", "mail", "names",
"news", "pair", "runner", "web", "web2",
"words", "vfs"]
class ConditionalExtension(Extension):
"""
An extension module that will only be compiled if certain conditions are
met.
@param condition: A callable of one argument which returns True or False to
indicate whether the extension should be built. The argument is an
instance of L{build_ext_twisted}, which has useful methods for checking
things about the platform.
"""
def __init__(self, *args, **kwargs):
self.condition = kwargs.pop("condition", lambda builder: True)
Extension.__init__(self, *args, **kwargs)
def setup(**kw):
"""
An alternative to distutils' setup() which is specially designed
for Twisted subprojects.
Pass twisted_subproject=projname if you want package and data
files to automatically be found for you.
@param conditionalExtensions: Extensions to optionally build.
@type conditionalExtensions: C{list} of L{ConditionalExtension}
"""
return core.setup(**get_setup_args(**kw))
def get_setup_args(**kw):
if 'twisted_subproject' in kw:
if 'twisted' not in os.listdir('.'):
raise RuntimeError("Sorry, you need to run setup.py from the "
"toplevel source directory.")
projname = kw['twisted_subproject']
projdir = os.path.join('twisted', projname)
kw['packages'] = getPackages(projdir, parent='twisted')
kw['version'] = getVersion(projname)
plugin = "twisted/plugins/twisted_" + projname + ".py"
if os.path.exists(plugin):
kw.setdefault('py_modules', []).append(
plugin.replace("/", ".")[:-3])
kw['data_files'] = getDataFiles(projdir, parent='twisted')
del kw['twisted_subproject']
else:
if 'plugins' in kw:
py_modules = []
for plg in kw['plugins']:
py_modules.append("twisted.plugins." + plg)
kw.setdefault('py_modules', []).extend(py_modules)
del kw['plugins']
if 'cmdclass' not in kw:
kw['cmdclass'] = {
'install_data': install_data_twisted,
'build_scripts': build_scripts_twisted}
if sys.version_info[:3] < (2, 3, 0):
kw['cmdclass']['build_py'] = build_py_twisted
if "conditionalExtensions" in kw:
extensions = kw["conditionalExtensions"]
del kw["conditionalExtensions"]
if 'ext_modules' not in kw:
# This is a workaround for distutils behavior; ext_modules isn't
# actually used by our custom builder. distutils deep-down checks
# to see if there are any ext_modules defined before invoking
# the build_ext command. We need to trigger build_ext regardless
# because it is the thing that does the conditional checks to see
# if it should build any extensions. The reason we have to delay
# the conditional checks until then is that the compiler objects
# are not yet set up when this code is executed.
kw["ext_modules"] = extensions
class my_build_ext(build_ext_twisted):
conditionalExtensions = extensions
kw.setdefault('cmdclass', {})['build_ext'] = my_build_ext
return kw
def getVersion(proj, base="twisted"):
"""
Extract the version number for a given project.
@param proj: the name of the project. Examples are "core",
"conch", "words", "mail".
@rtype: str
@returns: The version number of the project, as a string like
"2.0.0".
"""
if proj == 'core':
vfile = os.path.join(base, '_version.py')
else:
vfile = os.path.join(base, proj, '_version.py')
ns = {'__name__': 'Nothing to see here'}
execfile(vfile, ns)
return ns['version'].base()
# Names that are exluded from globbing results:
EXCLUDE_NAMES = ["{arch}", "CVS", ".cvsignore", "_darcs",
"RCS", "SCCS", ".svn"]
EXCLUDE_PATTERNS = ["*.py[cdo]", "*.s[ol]", ".#*", "*~", "*.py"]
import fnmatch
def _filterNames(names):
"""Given a list of file names, return those names that should be copied.
"""
names = [n for n in names
if n not in EXCLUDE_NAMES]
# This is needed when building a distro from a working
# copy (likely a checkout) rather than a pristine export:
for pattern in EXCLUDE_PATTERNS:
names = [n for n in names
if (not fnmatch.fnmatch(n, pattern))
and (not n.endswith('.py'))]
return names
def relativeTo(base, relativee):
"""
Gets 'relativee' relative to 'basepath'.
i.e.,
>>> relativeTo('/home/', '/home/radix/')
'radix'
>>> relativeTo('.', '/home/radix/Projects/Twisted') # curdir is /home/radix
'Projects/Twisted'
The 'relativee' must be a child of 'basepath'.
"""
basepath = os.path.abspath(base)
relativee = os.path.abspath(relativee)
if relativee.startswith(basepath):
relative = relativee[len(basepath):]
if relative.startswith(os.sep):
relative = relative[1:]
return os.path.join(base, relative)
raise ValueError("%s is not a subpath of %s" % (relativee, basepath))
def getDataFiles(dname, ignore=None, parent=None):
"""
Get all the data files that should be included in this distutils Project.
'dname' should be the path to the package that you're distributing.
'ignore' is a list of sub-packages to ignore. This facilitates
disparate package hierarchies. That's a fancy way of saying that
the 'twisted' package doesn't want to include the 'twisted.conch'
package, so it will pass ['conch'] as the value.
'parent' is necessary if you're distributing a subpackage like
twisted.conch. 'dname' should point to 'twisted/conch' and 'parent'
should point to 'twisted'. This ensures that your data_files are
generated correctly, only using relative paths for the first element
of the tuple ('twisted/conch/*').
The default 'parent' is the current working directory.
"""
parent = parent or "."
ignore = ignore or []
result = []
for directory, subdirectories, filenames in os.walk(dname):
resultfiles = []
for exname in EXCLUDE_NAMES:
if exname in subdirectories:
subdirectories.remove(exname)
for ig in ignore:
if ig in subdirectories:
subdirectories.remove(ig)
for filename in _filterNames(filenames):
resultfiles.append(filename)
if resultfiles:
result.append((relativeTo(parent, directory),
[relativeTo(parent,
os.path.join(directory, filename))
for filename in resultfiles]))
return result
def getPackages(dname, pkgname=None, results=None, ignore=None, parent=None):
"""
Get all packages which are under dname. This is necessary for
Python 2.2's distutils. Pretty similar arguments to getDataFiles,
including 'parent'.
"""
parent = parent or ""
prefix = []
if parent:
prefix = [parent]
bname = os.path.basename(dname)
ignore = ignore or []
if bname in ignore:
return []
if results is None:
results = []
if pkgname is None:
pkgname = []
subfiles = os.listdir(dname)
abssubfiles = [os.path.join(dname, x) for x in subfiles]
if '__init__.py' in subfiles:
results.append(prefix + pkgname + [bname])
for subdir in filter(os.path.isdir, abssubfiles):
getPackages(subdir, pkgname=pkgname + [bname],
results=results, ignore=ignore,
parent=parent)
res = ['.'.join(result) for result in results]
return res
def getScripts(projname, basedir=''):
"""
Returns a list of scripts for a Twisted subproject; this works in
any of an SVN checkout, a project-specific tarball.
"""
scriptdir = os.path.join(basedir, 'bin', projname)
if not os.path.isdir(scriptdir):
# Probably a project-specific tarball, in which case only this
# project's bins are included in 'bin'
scriptdir = os.path.join(basedir, 'bin')
if not os.path.isdir(scriptdir):
return []
thingies = os.listdir(scriptdir)
if '.svn' in thingies:
thingies.remove('.svn')
return filter(os.path.isfile,
[os.path.join(scriptdir, x) for x in thingies])
## Helpers and distutil tweaks
class build_py_twisted(build_py.build_py):
"""
Changes behavior in Python 2.2 to support simultaneous specification of
`packages' and `py_modules'.
"""
def run(self):
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.byte_compile(self.get_outputs(include_bytecode=0))
class build_scripts_twisted(build_scripts.build_scripts):
"""Renames scripts so they end with '.py' on Windows."""
def run(self):
build_scripts.build_scripts.run(self)
if not os.name == "nt":
return
for f in os.listdir(self.build_dir):
fpath=os.path.join(self.build_dir, f)
if not fpath.endswith(".py"):
try:
os.unlink(fpath + ".py")
except EnvironmentError, e:
if e.args[1]=='No such file or directory':
pass
os.rename(fpath, fpath + ".py")
class install_data_twisted(install_data.install_data):
"""I make sure data files are installed in the package directory."""
def finalize_options(self):
self.set_undefined_options('install',
('install_lib', 'install_dir')
)
install_data.install_data.finalize_options(self)
class build_ext_twisted(build_ext.build_ext):
"""
Allow subclasses to easily detect and customize Extensions to
build at install-time.
"""
def prepare_extensions(self):
"""
Prepare the C{self.extensions} attribute (used by
L{build_ext.build_ext}) by checking which extensions in
L{conditionalExtensions} should be built. In addition, if we are
building on NT, define the WIN32 macro to 1.
"""
# always define WIN32 under Windows
if os.name == 'nt':
self.define_macros = [("WIN32", 1)]
else:
self.define_macros = []
self.extensions = [x for x in self.conditionalExtensions
if x.condition(self)]
for ext in self.extensions:
ext.define_macros.extend(self.define_macros)
def build_extensions(self):
"""
Check to see which extension modules to build and then build them.
"""
self.prepare_extensions()
build_ext.build_ext.build_extensions(self)
def _remove_conftest(self):
for filename in ("conftest.c", "conftest.o", "conftest.obj"):
try:
os.unlink(filename)
except EnvironmentError:
pass
def _compile_helper(self, content):
conftest = open("conftest.c", "w")
try:
conftest.write(content)
conftest.close()
try:
self.compiler.compile(["conftest.c"], output_dir='')
except CompileError:
return False
return True
finally:
self._remove_conftest()
def _check_header(self, header_name):
"""
Check if the given header can be included by trying to compile a file
that contains only an #include line.
"""
self.compiler.announce("checking for %s ..." % header_name, 0)
return self._compile_helper("#include <%s>\n" % header_name)
| apache-2.0 |
cloudera/hue | desktop/core/ext-py/django-ipware-2.1.0/ipware/tests/tests_v1/tests_ipv4.py | 2 | 14544 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.http import HttpRequest
from django.test import TestCase
from ipware.ip import get_ip
from ipware.ip import get_real_ip
from ipware.ip import get_trusted_ip
class IPv4TestCase(TestCase):
"""IP address Test"""
def test_meta_none(self):
request = HttpRequest()
request.META = {
}
ip = get_real_ip(request)
self.assertIsNone(ip)
def test_http_x_forwarded_for_multiple(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '192.168.255.182, 10.0.0.0, 127.0.0.1, 198.84.193.157, 177.139.233.139',
'HTTP_X_REAL_IP': '177.139.233.132',
'REMOTE_ADDR': '177.139.233.133',
}
ip = get_real_ip(request)
self.assertEqual(ip, "198.84.193.157")
def test_http_x_forwarded_for_multiple_left_most_ip(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '192.168.255.182, 198.84.193.157, 10.0.0.0, 127.0.0.1, 177.139.233.139',
'HTTP_X_REAL_IP': '177.139.233.132',
'REMOTE_ADDR': '177.139.233.133',
}
ip = get_real_ip(request)
self.assertEqual(ip, "198.84.193.157")
def test_http_x_forwarded_for_multiple_right_most_ip(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '192.168.255.182, 198.84.193.157, 10.0.0.0, 127.0.0.1, 177.139.233.139',
'HTTP_X_REAL_IP': '177.139.233.132',
'REMOTE_ADDR': '177.139.233.133',
}
ip = get_real_ip(request, right_most_proxy=True)
self.assertEqual(ip, "177.139.233.139")
def test_http_x_forwarded_for_multiple_right_most_ip_private(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '192.168.255.182, 198.84.193.157, 10.0.0.0, 127.0.0.1, 177.139.233.139',
'HTTP_X_REAL_IP': '177.139.233.132',
'REMOTE_ADDR': '177.139.233.133',
}
ip = get_real_ip(request, right_most_proxy=True)
self.assertEqual(ip, "177.139.233.139")
def test_http_x_forwarded_for_multiple_bad_address(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': 'unknown, 192.168.255.182, 10.0.0.0, 127.0.0.1, 198.84.193.157, 177.139.233.139',
'HTTP_X_REAL_IP': '177.139.233.132',
'REMOTE_ADDR': '177.139.233.133',
}
ip = get_real_ip(request)
self.assertEqual(ip, "198.84.193.157")
def test_http_x_forwarded_for_singleton(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '177.139.233.139',
'HTTP_X_REAL_IP': '177.139.233.132',
'REMOTE_ADDR': '177.139.233.133',
}
ip = get_real_ip(request)
self.assertEqual(ip, "177.139.233.139")
def test_http_x_forwarded_for_singleton_private_address(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '192.168.255.182',
'HTTP_X_REAL_IP': '177.139.233.132',
'REMOTE_ADDR': '177.139.233.133',
}
ip = get_real_ip(request)
self.assertEqual(ip, "177.139.233.132")
def test_bad_http_x_forwarded_for_fallback_on_x_real_ip(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': 'unknown 177.139.233.139',
'HTTP_X_REAL_IP': '177.139.233.132',
'REMOTE_ADDR': '177.139.233.133',
}
ip = get_real_ip(request)
self.assertEqual(ip, "177.139.233.132")
def test_empty_http_x_forwarded_for_fallback_on_x_real_ip(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '',
'HTTP_X_REAL_IP': '177.139.233.132',
'REMOTE_ADDR': '177.139.233.133',
}
ip = get_real_ip(request)
self.assertEqual(ip, "177.139.233.132")
def test_empty_http_x_forwarded_for_empty_x_real_ip_fallback_on_remote_addr(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '',
'HTTP_X_REAL_IP': '',
'REMOTE_ADDR': '177.139.233.133',
}
ip = get_real_ip(request)
self.assertEqual(ip, "177.139.233.133")
def test_empty_http_x_forwarded_for_private_x_real_ip_fallback_on_remote_addr(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '',
'HTTP_X_REAL_IP': '192.168.255.182',
'REMOTE_ADDR': '177.139.233.133',
}
ip = get_real_ip(request)
self.assertEqual(ip, "177.139.233.133")
def test_private_http_x_forward_for_ip_addr(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '127.0.0.1',
'HTTP_X_REAL_IP': '',
'REMOTE_ADDR': '',
}
ip = get_real_ip(request)
self.assertEqual(ip, None)
def test_private_remote_addr_for_ip_addr(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '',
'REMOTE_ADDR': '127.0.0.1',
}
ip = get_real_ip(request)
self.assertEqual(ip, None)
def test_missing_x_forwarded(self):
request = HttpRequest()
request.META = {
'REMOTE_ADDR': '177.139.233.133',
}
ip = get_real_ip(request)
self.assertEqual(ip, "177.139.233.133")
def test_missing_x_forwarded_missing_real_ip(self):
request = HttpRequest()
request.META = {
'REMOTE_ADDR': '177.139.233.133',
}
ip = get_real_ip(request)
self.assertEqual(ip, "177.139.233.133")
def test_best_matched_real_ip(self):
request = HttpRequest()
request.META = {
'HTTP_X_REAL_IP': '127.0.0.1',
'REMOTE_ADDR': '177.31.233.133',
}
ip = get_ip(request)
self.assertEqual(ip, "177.31.233.133")
def test_best_matched_private_ip(self):
request = HttpRequest()
request.META = {
'HTTP_X_REAL_IP': '127.0.0.1',
'REMOTE_ADDR': '192.31.233.133',
}
ip = get_ip(request)
self.assertEqual(ip, "192.31.233.133")
def test_best_matched_private_ip_2(self):
request = HttpRequest()
request.META = {
'HTTP_X_REAL_IP': '192.31.233.133',
'REMOTE_ADDR': '127.0.0.1',
}
ip = get_ip(request)
self.assertEqual(ip, "192.31.233.133")
def test_x_forwarded_for_multiple(self):
request = HttpRequest()
request.META = {
'X_FORWARDED_FOR': '192.168.255.182, 10.0.0.0, 127.0.0.1, 198.84.193.157, 177.139.233.139',
'REMOTE_ADDR': '177.139.233.133',
}
ip = get_real_ip(request)
self.assertEqual(ip, "198.84.193.157")
def test_x_forwarded_for_multiple_left_most_ip(self):
request = HttpRequest()
request.META = {
'X_FORWARDED_FOR': '192.168.255.182, 198.84.193.157, 10.0.0.0, 127.0.0.1, 177.139.233.139',
'REMOTE_ADDR': '177.139.233.133',
}
ip = get_real_ip(request)
self.assertEqual(ip, "198.84.193.157")
def test_x_forwarded_for_multiple_right_most_ip(self):
request = HttpRequest()
request.META = {
'X_FORWARDED_FOR': '192.168.255.182, 198.84.193.157, 10.0.0.0, 127.0.0.1, 177.139.233.139',
'REMOTE_ADDR': '177.139.233.133',
}
ip = get_real_ip(request, right_most_proxy=True)
self.assertEqual(ip, "177.139.233.139")
def test_x_forwarded_for_multiple_right_most_ip_private(self):
request = HttpRequest()
request.META = {
'X_FORWARDED_FOR': '192.168.255.182, 198.84.193.157, 10.0.0.0, 127.0.0.1, 177.139.233.139',
'REMOTE_ADDR': '177.139.233.133',
}
ip = get_real_ip(request, right_most_proxy=True)
self.assertEqual(ip, "177.139.233.139")
def test_x_forwarded_for_multiple_bad_address(self):
request = HttpRequest()
request.META = {
'X_FORWARDED_FOR': 'unknown, 192.168.255.182, 10.0.0.0, 127.0.0.1, 198.84.193.157, 177.139.233.139',
'REMOTE_ADDR': '177.139.233.133',
}
ip = get_real_ip(request)
self.assertEqual(ip, "198.84.193.157")
def test_x_forwarded_for_singleton(self):
request = HttpRequest()
request.META = {
'X_FORWARDED_FOR': '177.139.233.139',
'REMOTE_ADDR': '177.139.233.133',
}
ip = get_real_ip(request)
self.assertEqual(ip, "177.139.233.139")
def test_x_forwarded_for_singleton_private_address(self):
request = HttpRequest()
request.META = {
'X_FORWARDED_FOR': '192.168.255.182',
'REMOTE_ADDR': '177.139.233.133',
}
ip = get_real_ip(request)
self.assertEqual(ip, "177.139.233.133")
def test_bad_x_forwarded_for_fallback_on_x_real_ip(self):
request = HttpRequest()
request.META = {
'X_FORWARDED_FOR': 'unknown 177.139.233.139',
'REMOTE_ADDR': '177.139.233.133',
}
ip = get_real_ip(request)
self.assertEqual(ip, "177.139.233.133")
def test_empty_x_forwarded_for_fallback_on_x_real_ip(self):
request = HttpRequest()
request.META = {
'X_FORWARDED_FOR': '',
'REMOTE_ADDR': '177.139.233.133',
}
ip = get_real_ip(request)
self.assertEqual(ip, "177.139.233.133")
def test_empty_x_forwarded_for_empty_x_real_ip_fallback_on_remote_addr(self):
request = HttpRequest()
request.META = {
'X_FORWARDED_FOR': '',
'REMOTE_ADDR': '177.139.233.133',
}
ip = get_real_ip(request)
self.assertEqual(ip, "177.139.233.133")
def test_empty_x_forwarded_for_private_x_real_ip_fallback_on_remote_addr(self):
request = HttpRequest()
request.META = {
'X_FORWARDED_FOR': '',
'REMOTE_ADDR': '177.139.233.133',
}
ip = get_real_ip(request)
self.assertEqual(ip, "177.139.233.133")
def test_private_x_forward_for_ip_addr(self):
request = HttpRequest()
request.META = {
'X_FORWARDED_FOR': '127.0.0.1',
'REMOTE_ADDR': '',
}
ip = get_real_ip(request)
self.assertEqual(ip, None)
def test_x_forwarded_for_singleton_hyphen_as_delimiter(self):
request = HttpRequest()
request.META = {
'X-FORWARDED-FOR': '177.139.233.139',
'REMOTE-ADDR': '177.139.233.133',
}
ip = get_real_ip(request)
self.assertEqual(ip, "177.139.233.139")
class IPv4TrustedProxiesTestCase(TestCase):
"""Trusted Proxies - IP address Test"""
def test_meta_none(self):
request = HttpRequest()
request.META = {
}
ip = get_trusted_ip(request)
self.assertIsNone(ip)
def test_http_x_forwarded_for_conf_settings(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '198.84.193.157, 177.139.200.139, 177.139.233.100',
}
ip = get_trusted_ip(request)
self.assertEqual(ip, "198.84.193.157")
def test_http_x_forwarded_for_no_proxy(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '198.84.193.157, 177.139.200.139, 177.139.233.139',
}
ip = get_trusted_ip(request, trusted_proxies=[])
self.assertIsNone(ip)
def test_http_x_forwarded_for_single_proxy(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '198.84.193.157, 177.139.200.139, 177.139.233.139',
}
ip = get_trusted_ip(request, trusted_proxies=['177.139.233.139'])
self.assertEqual(ip, "198.84.193.157")
def test_http_x_forwarded_for_single_proxy_with_right_most(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '177.139.233.139, 177.139.200.139, 198.84.193.157',
}
ip = get_trusted_ip(request, right_most_proxy=True, trusted_proxies=['177.139.233.139'])
self.assertEqual(ip, "198.84.193.157")
def test_http_x_forwarded_for_multi_proxy(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '198.84.193.157, 177.139.200.139, 177.139.233.139',
}
ip = get_trusted_ip(request, trusted_proxies=['177.139.233.138', '177.139.233.139'])
self.assertEqual(ip, "198.84.193.157")
def test_http_x_forwarded_for_all_proxies_in_subnet(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '198.84.193.157, 177.139.200.139, 177.139.233.139',
}
ip = get_trusted_ip(request, trusted_proxies=['177.139.233'])
self.assertEqual(ip, "198.84.193.157")
def test_http_x_forwarded_for_all_proxies_in_subnet_2(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '198.84.193.157, 177.139.200.139, 177.139.233.139',
}
ip = get_trusted_ip(request, trusted_proxies=['177.139'])
self.assertEqual(ip, "198.84.193.157")
def test_x_forwarded_for_single_proxy(self):
request = HttpRequest()
request.META = {
'X_FORWARDED_FOR': '198.84.193.157, 177.139.200.139, 177.139.233.139',
}
ip = get_trusted_ip(request, trusted_proxies=['177.139.233.139'])
self.assertEqual(ip, "198.84.193.157")
def test_x_forwarded_for_single_proxy_hyphens(self):
request = HttpRequest()
request.META = {
'X-FORWARDED-FOR': '198.84.193.157, 177.139.200.139, 177.139.233.139',
}
ip = get_trusted_ip(request, trusted_proxies=['177.139.233.139'])
self.assertEqual(ip, "198.84.193.157")
def test_http_x_forwarded_for_and_x_forward_for_single_proxy(self):
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_FOR': '198.84.193.156, 177.139.200.139, 177.139.233.139',
'X_FORWARDED_FOR': '198.84.193.157, 177.139.200.139, 177.139.233.139',
}
ip = get_trusted_ip(request, trusted_proxies=['177.139.233.139'])
self.assertEqual(ip, "198.84.193.156")
| apache-2.0 |
piyush0609/scipy | scipy/io/harwell_boeing/tests/test_hb.py | 126 | 2389 | from __future__ import division, print_function, absolute_import
import os
import sys
if sys.version_info[0] >= 3:
from io import StringIO
else:
from StringIO import StringIO
import tempfile
import numpy as np
from numpy.testing import TestCase, assert_equal, \
assert_array_almost_equal_nulp
from scipy.sparse import coo_matrix, csc_matrix, rand
from scipy.io import hb_read, hb_write
from scipy.io.harwell_boeing import HBFile, HBInfo
SIMPLE = """\
No Title |No Key
9 4 1 4
RUA 100 100 10 0
(26I3) (26I3) (3E23.15)
1 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
3 3 3 3 3 3 3 4 4 4 6 6 6 6 6 6 6 6 6 6 6 8 9 9 9 9
9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 11
37 71 89 18 30 45 70 19 25 52
2.971243799687726e-01 3.662366682877375e-01 4.786962174699534e-01
6.490068647991184e-01 6.617490424831662e-02 8.870370343191623e-01
4.196478590163001e-01 5.649603072111251e-01 9.934423887087086e-01
6.912334991524289e-01
"""
SIMPLE_MATRIX = coo_matrix(
(
(0.297124379969, 0.366236668288, 0.47869621747, 0.649006864799,
0.0661749042483, 0.887037034319, 0.419647859016,
0.564960307211, 0.993442388709, 0.691233499152,),
(np.array([[36, 70, 88, 17, 29, 44, 69, 18, 24, 51],
[0, 4, 58, 61, 61, 72, 72, 73, 99, 99]]))))
def assert_csc_almost_equal(r, l):
r = csc_matrix(r)
l = csc_matrix(l)
assert_equal(r.indptr, l.indptr)
assert_equal(r.indices, l.indices)
assert_array_almost_equal_nulp(r.data, l.data, 10000)
class TestHBReader(TestCase):
def test_simple(self):
m = hb_read(StringIO(SIMPLE))
assert_csc_almost_equal(m, SIMPLE_MATRIX)
class TestRBRoundtrip(TestCase):
def test_simple(self):
rm = rand(100, 1000, 0.05).tocsc()
fd, filename = tempfile.mkstemp(suffix="rb")
try:
hb_write(filename, rm, HBInfo.from_data(rm))
m = hb_read(filename)
finally:
os.close(fd)
os.remove(filename)
assert_csc_almost_equal(m, rm)
| bsd-3-clause |
andreparames/odoo | addons/pos_discount/__init__.py | 315 | 1072 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import discount
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
vodik/pytest | _pytest/resultlog.py | 208 | 3536 | """ log machine-parseable test session result information in a plain
text file.
"""
import py
import os
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "resultlog plugin options")
group.addoption('--resultlog', '--result-log', action="store",
metavar="path", default=None,
help="path for machine-readable result log.")
def pytest_configure(config):
resultlog = config.option.resultlog
# prevent opening resultlog on slave nodes (xdist)
if resultlog and not hasattr(config, 'slaveinput'):
dirname = os.path.dirname(os.path.abspath(resultlog))
if not os.path.isdir(dirname):
os.makedirs(dirname)
logfile = open(resultlog, 'w', 1) # line buffered
config._resultlog = ResultLog(config, logfile)
config.pluginmanager.register(config._resultlog)
def pytest_unconfigure(config):
resultlog = getattr(config, '_resultlog', None)
if resultlog:
resultlog.logfile.close()
del config._resultlog
config.pluginmanager.unregister(resultlog)
def generic_path(item):
chain = item.listchain()
gpath = [chain[0].name]
fspath = chain[0].fspath
fspart = False
for node in chain[1:]:
newfspath = node.fspath
if newfspath == fspath:
if fspart:
gpath.append(':')
fspart = False
else:
gpath.append('.')
else:
gpath.append('/')
fspart = True
name = node.name
if name[0] in '([':
gpath.pop()
gpath.append(name)
fspath = newfspath
return ''.join(gpath)
class ResultLog(object):
def __init__(self, config, logfile):
self.config = config
self.logfile = logfile # preferably line buffered
def write_log_entry(self, testpath, lettercode, longrepr):
py.builtin.print_("%s %s" % (lettercode, testpath), file=self.logfile)
for line in longrepr.splitlines():
py.builtin.print_(" %s" % line, file=self.logfile)
def log_outcome(self, report, lettercode, longrepr):
testpath = getattr(report, 'nodeid', None)
if testpath is None:
testpath = report.fspath
self.write_log_entry(testpath, lettercode, longrepr)
def pytest_runtest_logreport(self, report):
if report.when != "call" and report.passed:
return
res = self.config.hook.pytest_report_teststatus(report=report)
code = res[1]
if code == 'x':
longrepr = str(report.longrepr)
elif code == 'X':
longrepr = ''
elif report.passed:
longrepr = ""
elif report.failed:
longrepr = str(report.longrepr)
elif report.skipped:
longrepr = str(report.longrepr[2])
self.log_outcome(report, code, longrepr)
def pytest_collectreport(self, report):
if not report.passed:
if report.failed:
code = "F"
longrepr = str(report.longrepr)
else:
assert report.skipped
code = "S"
longrepr = "%s:%d: %s" % report.longrepr
self.log_outcome(report, code, longrepr)
def pytest_internalerror(self, excrepr):
reprcrash = getattr(excrepr, 'reprcrash', None)
path = getattr(reprcrash, "path", None)
if path is None:
path = "cwd:%s" % py.path.local()
self.write_log_entry(path, '!', str(excrepr))
| mit |
ahmed-mahran/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/xlwt3/ExcelMagic.py | 46 | 29668 | """ lots of Excel Magic Numbers """
# Boundaries BIFF8+
MAX_ROW = 65536
MAX_COL = 256
biff_records = {
0x0000: "DIMENSIONS",
0x0001: "BLANK",
0x0002: "INTEGER",
0x0003: "NUMBER",
0x0004: "LABEL",
0x0005: "BOOLERR",
0x0006: "FORMULA",
0x0007: "STRING",
0x0008: "ROW",
0x0009: "BOF",
0x000A: "EOF",
0x000B: "INDEX",
0x000C: "CALCCOUNT",
0x000D: "CALCMODE",
0x000E: "PRECISION",
0x000F: "REFMODE",
0x0010: "DELTA",
0x0011: "ITERATION",
0x0012: "PROTECT",
0x0013: "PASSWORD",
0x0014: "HEADER",
0x0015: "FOOTER",
0x0016: "EXTERNCOUNT",
0x0017: "EXTERNSHEET",
0x0018: "NAME",
0x0019: "WINDOWPROTECT",
0x001A: "VERTICALPAGEBREAKS",
0x001B: "HORIZONTALPAGEBREAKS",
0x001C: "NOTE",
0x001D: "SELECTION",
0x001E: "FORMAT",
0x001F: "FORMATCOUNT",
0x0020: "COLUMNDEFAULT",
0x0021: "ARRAY",
0x0022: "1904",
0x0023: "EXTERNNAME",
0x0024: "COLWIDTH",
0x0025: "DEFAULTROWHEIGHT",
0x0026: "LEFTMARGIN",
0x0027: "RIGHTMARGIN",
0x0028: "TOPMARGIN",
0x0029: "BOTTOMMARGIN",
0x002A: "PRINTHEADERS",
0x002B: "PRINTGRIDLINES",
0x002F: "FILEPASS",
0x0031: "FONT",
0x0036: "TABLE",
0x003C: "CONTINUE",
0x003D: "WINDOW1",
0x003E: "WINDOW2",
0x0040: "BACKUP",
0x0041: "PANE",
0x0042: "CODEPAGE",
0x0043: "XF",
0x0044: "IXFE",
0x0045: "EFONT",
0x004D: "PLS",
0x0050: "DCON",
0x0051: "DCONREF",
0x0053: "DCONNAME",
0x0055: "DEFCOLWIDTH",
0x0056: "BUILTINFMTCNT",
0x0059: "XCT",
0x005A: "CRN",
0x005B: "FILESHARING",
0x005C: "WRITEACCESS",
0x005D: "OBJ",
0x005E: "UNCALCED",
0x005F: "SAFERECALC",
0x0060: "TEMPLATE",
0x0063: "OBJPROTECT",
0x007D: "COLINFO",
0x007E: "RK",
0x007F: "IMDATA",
0x0080: "GUTS",
0x0081: "WSBOOL",
0x0082: "GRIDSET",
0x0083: "HCENTER",
0x0084: "VCENTER",
0x0085: "BOUNDSHEET",
0x0086: "WRITEPROT",
0x0087: "ADDIN",
0x0088: "EDG",
0x0089: "PUB",
0x008C: "COUNTRY",
0x008D: "HIDEOBJ",
0x008E: "BUNDLESOFFSET",
0x008F: "BUNDLEHEADER",
0x0090: "SORT",
0x0091: "SUB",
0x0092: "PALETTE",
0x0093: "STYLE",
0x0094: "LHRECORD",
0x0095: "LHNGRAPH",
0x0096: "SOUND",
0x0098: "LPR",
0x0099: "STANDARDWIDTH",
0x009A: "FNGROUPNAME",
0x009B: "FILTERMODE",
0x009C: "FNGROUPCOUNT",
0x009D: "AUTOFILTERINFO",
0x009E: "AUTOFILTER",
0x00A0: "SCL",
0x00A1: "SETUP",
0x00A9: "COORDLIST",
0x00AB: "GCW",
0x00AE: "SCENMAN",
0x00AF: "SCENARIO",
0x00B0: "SXVIEW",
0x00B1: "SXVD",
0x00B2: "SXVI",
0x00B4: "SXIVD",
0x00B5: "SXLI",
0x00B6: "SXPI",
0x00B8: "DOCROUTE",
0x00B9: "RECIPNAME",
0x00BC: "SHRFMLA",
0x00BD: "MULRK",
0x00BE: "MULBLANK",
0x00C1: "MMS",
0x00C2: "ADDMENU",
0x00C3: "DELMENU",
0x00C5: "SXDI",
0x00C6: "SXDB",
0x00C7: "SXFIELD",
0x00C8: "SXINDEXLIST",
0x00C9: "SXDOUBLE",
0x00CD: "SXSTRING",
0x00CE: "SXDATETIME",
0x00D0: "SXTBL",
0x00D1: "SXTBRGITEM",
0x00D2: "SXTBPG",
0x00D3: "OBPROJ",
0x00D5: "SXIDSTM",
0x00D6: "RSTRING",
0x00D7: "DBCELL",
0x00DA: "BOOKBOOL",
0x00DC: "SXEXT|PARAMQRY",
0x00DD: "SCENPROTECT",
0x00DE: "OLESIZE",
0x00DF: "UDDESC",
0x00E0: "XF",
0x00E1: "INTERFACEHDR",
0x00E2: "INTERFACEEND",
0x00E3: "SXVS",
0x00E5: "MERGEDCELLS",
0x00E9: "BITMAP",
0x00EB: "MSODRAWINGGROUP",
0x00EC: "MSODRAWING",
0x00ED: "MSODRAWINGSELECTION",
0x00F0: "SXRULE",
0x00F1: "SXEX",
0x00F2: "SXFILT",
0x00F6: "SXNAME",
0x00F7: "SXSELECT",
0x00F8: "SXPAIR",
0x00F9: "SXFMLA",
0x00FB: "SXFORMAT",
0x00FC: "SST",
0x00FD: "LABELSST",
0x00FF: "EXTSST",
0x0100: "SXVDEX",
0x0103: "SXFORMULA",
0x0122: "SXDBEX",
0x0137: "CHTRINSERT",
0x0138: "CHTRINFO",
0x013B: "CHTRCELLCONTENT",
0x013D: "TABID",
0x0140: "CHTRMOVERANGE",
0x014D: "CHTRINSERTTAB",
0x015F: "LABELRANGES",
0x0160: "USESELFS",
0x0161: "DSF",
0x0162: "XL5MODIFY",
0x0196: "CHTRHEADER",
0x01A9: "USERBVIEW",
0x01AA: "USERSVIEWBEGIN",
0x01AB: "USERSVIEWEND",
0x01AD: "QSI",
0x01AE: "SUPBOOK",
0x01AF: "PROT4REV",
0x01B0: "CONDFMT",
0x01B1: "CF",
0x01B2: "DVAL",
0x01B5: "DCONBIN",
0x01B6: "TXO",
0x01B7: "REFRESHALL",
0x01B8: "HLINK",
0x01BA: "CODENAME",
0x01BB: "SXFDBTYPE",
0x01BC: "PROT4REVPASS",
0x01BE: "DV",
0x01C0: "XL9FILE",
0x01C1: "RECALCID",
0x0200: "DIMENSIONS",
0x0201: "BLANK",
0x0203: "NUMBER",
0x0204: "LABEL",
0x0205: "BOOLERR",
0x0206: "FORMULA",
0x0207: "STRING",
0x0208: "ROW",
0x0209: "BOF",
0x020B: "INDEX",
0x0218: "NAME",
0x0221: "ARRAY",
0x0223: "EXTERNNAME",
0x0225: "DEFAULTROWHEIGHT",
0x0231: "FONT",
0x0236: "TABLE",
0x023E: "WINDOW2",
0x0243: "XF",
0x027E: "RK",
0x0293: "STYLE",
0x0406: "FORMULA",
0x0409: "BOF",
0x041E: "FORMAT",
0x0443: "XF",
0x04BC: "SHRFMLA",
0x0800: "SCREENTIP",
0x0803: "WEBQRYSETTINGS",
0x0804: "WEBQRYTABLES",
0x0809: "BOF",
0x0862: "SHEETLAYOUT",
0x0867: "SHEETPROTECTION",
0x1001: "UNITS",
0x1002: "ChartChart",
0x1003: "ChartSeries",
0x1006: "ChartDataformat",
0x1007: "ChartLineformat",
0x1009: "ChartMarkerformat",
0x100A: "ChartAreaformat",
0x100B: "ChartPieformat",
0x100C: "ChartAttachedlabel",
0x100D: "ChartSeriestext",
0x1014: "ChartChartformat",
0x1015: "ChartLegend",
0x1016: "ChartSerieslist",
0x1017: "ChartBar",
0x1018: "ChartLine",
0x1019: "ChartPie",
0x101A: "ChartArea",
0x101B: "ChartScatter",
0x101C: "ChartChartline",
0x101D: "ChartAxis",
0x101E: "ChartTick",
0x101F: "ChartValuerange",
0x1020: "ChartCatserrange",
0x1021: "ChartAxislineformat",
0x1022: "ChartFormatlink",
0x1024: "ChartDefaulttext",
0x1025: "ChartText",
0x1026: "ChartFontx",
0x1027: "ChartObjectLink",
0x1032: "ChartFrame",
0x1033: "BEGIN",
0x1034: "END",
0x1035: "ChartPlotarea",
0x103A: "Chart3D",
0x103C: "ChartPicf",
0x103D: "ChartDropbar",
0x103E: "ChartRadar",
0x103F: "ChartSurface",
0x1040: "ChartRadararea",
0x1041: "ChartAxisparent",
0x1043: "ChartLegendxn",
0x1044: "ChartShtprops",
0x1045: "ChartSertocrt",
0x1046: "ChartAxesused",
0x1048: "ChartSbaseref",
0x104A: "ChartSerparent",
0x104B: "ChartSerauxtrend",
0x104E: "ChartIfmt",
0x104F: "ChartPos",
0x1050: "ChartAlruns",
0x1051: "ChartAI",
0x105B: "ChartSerauxerrbar",
0x105D: "ChartSerfmt",
0x105F: "Chart3DDataFormat",
0x1060: "ChartFbi",
0x1061: "ChartBoppop",
0x1062: "ChartAxcext",
0x1063: "ChartDat",
0x1064: "ChartPlotgrowth",
0x1065: "ChartSiindex",
0x1066: "ChartGelframe",
0x1067: "ChartBoppcustom",
0xFFFF: ""
}
all_funcs_by_name = {
# Includes Analysis ToolPak aka ATP aka add-in aka xcall functions,
# distinguished by -ve opcode.
# name: (opcode, min # args, max # args, func return type, func arg types)
# + in func arg types means more of the same.
'ABS' : ( 24, 1, 1, 'V', 'V'),
'ACCRINT' : ( -1, 6, 7, 'V', 'VVVVVVV'),
'ACCRINTM' : ( -1, 3, 5, 'V', 'VVVVV'),
'ACOS' : ( 99, 1, 1, 'V', 'V'),
'ACOSH' : (233, 1, 1, 'V', 'V'),
'ADDRESS' : (219, 2, 5, 'V', 'VVVVV'),
'AMORDEGRC' : ( -1, 7, 7, 'V', 'VVVVVVV'),
'AMORLINC' : ( -1, 7, 7, 'V', 'VVVVVVV'),
'AND' : ( 36, 1, 30, 'V', 'D+'),
'AREAS' : ( 75, 1, 1, 'V', 'R'),
'ASC' : (214, 1, 1, 'V', 'V'),
'ASIN' : ( 98, 1, 1, 'V', 'V'),
'ASINH' : (232, 1, 1, 'V', 'V'),
'ATAN' : ( 18, 1, 1, 'V', 'V'),
'ATAN2' : ( 97, 2, 2, 'V', 'VV'),
'ATANH' : (234, 1, 1, 'V', 'V'),
'AVEDEV' : (269, 1, 30, 'V', 'D+'),
'AVERAGE' : ( 5, 1, 30, 'V', 'D+'),
'AVERAGEA' : (361, 1, 30, 'V', 'D+'),
'BAHTTEXT' : (368, 1, 1, 'V', 'V'),
'BESSELI' : ( -1, 2, 2, 'V', 'VV'),
'BESSELJ' : ( -1, 2, 2, 'V', 'VV'),
'BESSELK' : ( -1, 2, 2, 'V', 'VV'),
'BESSELY' : ( -1, 2, 2, 'V', 'VV'),
'BETADIST' : (270, 3, 5, 'V', 'VVVVV'),
'BETAINV' : (272, 3, 5, 'V', 'VVVVV'),
'BIN2DEC' : ( -1, 1, 1, 'V', 'V'),
'BIN2HEX' : ( -1, 1, 2, 'V', 'VV'),
'BIN2OCT' : ( -1, 1, 2, 'V', 'VV'),
'BINOMDIST' : (273, 4, 4, 'V', 'VVVV'),
'CEILING' : (288, 2, 2, 'V', 'VV'),
'CELL' : (125, 1, 2, 'V', 'VR'),
'CHAR' : (111, 1, 1, 'V', 'V'),
'CHIDIST' : (274, 2, 2, 'V', 'VV'),
'CHIINV' : (275, 2, 2, 'V', 'VV'),
'CHITEST' : (306, 2, 2, 'V', 'AA'),
'CHOOSE' : (100, 2, 30, 'R', 'VR+'),
'CLEAN' : (162, 1, 1, 'V', 'V'),
'CODE' : (121, 1, 1, 'V', 'V'),
'COLUMN' : ( 9, 0, 1, 'V', 'R'),
'COLUMNS' : ( 77, 1, 1, 'V', 'R'),
'COMBIN' : (276, 2, 2, 'V', 'VV'),
'COMPLEX' : ( -1, 2, 3, 'V', 'VVV'),
'CONCATENATE' : (336, 1, 30, 'V', 'V+'),
'CONFIDENCE' : (277, 3, 3, 'V', 'VVV'),
'CONVERT' : ( -1, 3, 3, 'V', 'VVV'),
'CORREL' : (307, 2, 2, 'V', 'AA'),
'COS' : ( 16, 1, 1, 'V', 'V'),
'COSH' : (230, 1, 1, 'V', 'V'),
'COUNT' : ( 0, 1, 30, 'V', 'D+'),
'COUNTA' : (169, 1, 30, 'V', 'D+'),
'COUNTBLANK' : (347, 1, 1, 'V', 'R'),
'COUNTIF' : (346, 2, 2, 'V', 'RV'),
'COUPDAYBS' : ( -1, 3, 5, 'V', 'VVVVV'),
'COUPDAYS' : ( -1, 3, 5, 'V', 'VVVVV'),
'COUPDAYSNC' : ( -1, 3, 5, 'V', 'VVVVV'),
'COUPNCD' : ( -1, 3, 5, 'V', 'VVVVV'),
'COUPNUM' : ( -1, 3, 5, 'V', 'VVVVV'),
'COUPPCD' : ( -1, 3, 5, 'V', 'VVVVV'),
'COVAR' : (308, 2, 2, 'V', 'AA'),
'CRITBINOM' : (278, 3, 3, 'V', 'VVV'),
'CUMIPMT' : ( -1, 6, 6, 'V', 'VVVVVV'),
'CUMPRINC' : ( -1, 6, 6, 'V', 'VVVVVV'),
'DATE' : ( 65, 3, 3, 'V', 'VVV'),
'DATEDIF' : (351, 3, 3, 'V', 'VVV'),
'DATEVALUE' : (140, 1, 1, 'V', 'V'),
'DAVERAGE' : ( 42, 3, 3, 'V', 'RRR'),
'DAY' : ( 67, 1, 1, 'V', 'V'),
'DAYS360' : (220, 2, 3, 'V', 'VVV'),
'DB' : (247, 4, 5, 'V', 'VVVVV'),
'DBCS' : (215, 1, 1, 'V', 'V'),
'DCOUNT' : ( 40, 3, 3, 'V', 'RRR'),
'DCOUNTA' : (199, 3, 3, 'V', 'RRR'),
'DDB' : (144, 4, 5, 'V', 'VVVVV'),
'DEC2BIN' : ( -1, 1, 2, 'V', 'VV'),
'DEC2HEX' : ( -1, 1, 2, 'V', 'VV'),
'DEC2OCT' : ( -1, 1, 2, 'V', 'VV'),
'DEGREES' : (343, 1, 1, 'V', 'V'),
'DELTA' : ( -1, 1, 2, 'V', 'VV'),
'DEVSQ' : (318, 1, 30, 'V', 'D+'),
'DGET' : (235, 3, 3, 'V', 'RRR'),
'DISC' : ( -1, 4, 5, 'V', 'VVVVV'),
'DMAX' : ( 44, 3, 3, 'V', 'RRR'),
'DMIN' : ( 43, 3, 3, 'V', 'RRR'),
'DOLLAR' : ( 13, 1, 2, 'V', 'VV'),
'DOLLARDE' : ( -1, 2, 2, 'V', 'VV'),
'DOLLARFR' : ( -1, 2, 2, 'V', 'VV'),
'DPRODUCT' : (189, 3, 3, 'V', 'RRR'),
'DSTDEV' : ( 45, 3, 3, 'V', 'RRR'),
'DSTDEVP' : (195, 3, 3, 'V', 'RRR'),
'DSUM' : ( 41, 3, 3, 'V', 'RRR'),
'DURATION' : ( -1, 5, 6, 'V', 'VVVVVV'),
'DVAR' : ( 47, 3, 3, 'V', 'RRR'),
'DVARP' : (196, 3, 3, 'V', 'RRR'),
'EDATE' : ( -1, 2, 2, 'V', 'VV'),
'EFFECT' : ( -1, 2, 2, 'V', 'VV'),
'EOMONTH' : ( -1, 1, 2, 'V', 'VV'),
'ERF' : ( -1, 1, 2, 'V', 'VV'),
'ERFC' : ( -1, 1, 1, 'V', 'V'),
'ERROR.TYPE' : (261, 1, 1, 'V', 'V'),
'EVEN' : (279, 1, 1, 'V', 'V'),
'EXACT' : (117, 2, 2, 'V', 'VV'),
'EXP' : ( 21, 1, 1, 'V', 'V'),
'EXPONDIST' : (280, 3, 3, 'V', 'VVV'),
'FACT' : (184, 1, 1, 'V', 'V'),
'FACTDOUBLE' : ( -1, 1, 1, 'V', 'V'),
'FALSE' : ( 35, 0, 0, 'V', '-'),
'FDIST' : (281, 3, 3, 'V', 'VVV'),
'FIND' : (124, 2, 3, 'V', 'VVV'),
'FINDB' : (205, 2, 3, 'V', 'VVV'),
'FINV' : (282, 3, 3, 'V', 'VVV'),
'FISHER' : (283, 1, 1, 'V', 'V'),
'FISHERINV' : (284, 1, 1, 'V', 'V'),
'FIXED' : ( 14, 2, 3, 'V', 'VVV'),
'FLOOR' : (285, 2, 2, 'V', 'VV'),
'FORECAST' : (309, 3, 3, 'V', 'VAA'),
'FREQUENCY' : (252, 2, 2, 'A', 'RR'),
'FTEST' : (310, 2, 2, 'V', 'AA'),
'FV' : ( 57, 3, 5, 'V', 'VVVVV'),
'FVSCHEDULE' : ( -1, 2, 2, 'V', 'VA'),
'GAMMADIST' : (286, 4, 4, 'V', 'VVVV'),
'GAMMAINV' : (287, 3, 3, 'V', 'VVV'),
'GAMMALN' : (271, 1, 1, 'V', 'V'),
'GCD' : ( -1, 1, 29, 'V', 'V+'),
'GEOMEAN' : (319, 1, 30, 'V', 'D+'),
'GESTEP' : ( -1, 1, 2, 'V', 'VV'),
'GETPIVOTDATA': (358, 2, 30, 'A', 'VAV+'),
'GROWTH' : ( 52, 1, 4, 'A', 'RRRV'),
'HARMEAN' : (320, 1, 30, 'V', 'D+'),
'HEX2BIN' : ( -1, 1, 2, 'V', 'VV'),
'HEX2DEC' : ( -1, 1, 1, 'V', 'V'),
'HEX2OCT' : ( -1, 1, 2, 'V', 'VV'),
'HLOOKUP' : (101, 3, 4, 'V', 'VRRV'),
'HOUR' : ( 71, 1, 1, 'V', 'V'),
'HYPERLINK' : (359, 1, 2, 'V', 'VV'),
'HYPGEOMDIST' : (289, 4, 4, 'V', 'VVVV'),
'IF' : ( 1, 2, 3, 'R', 'VRR'),
'IMABS' : ( -1, 1, 1, 'V', 'V'),
'IMAGINARY' : ( -1, 1, 1, 'V', 'V'),
'IMARGUMENT' : ( -1, 1, 1, 'V', 'V'),
'IMCONJUGATE' : ( -1, 1, 1, 'V', 'V'),
'IMCOS' : ( -1, 1, 1, 'V', 'V'),
'IMDIV' : ( -1, 2, 2, 'V', 'VV'),
'IMEXP' : ( -1, 1, 1, 'V', 'V'),
'IMLN' : ( -1, 1, 1, 'V', 'V'),
'IMLOG10' : ( -1, 1, 1, 'V', 'V'),
'IMLOG2' : ( -1, 1, 1, 'V', 'V'),
'IMPOWER' : ( -1, 2, 2, 'V', 'VV'),
'IMPRODUCT' : ( -1, 2, 2, 'V', 'VV'),
'IMREAL' : ( -1, 1, 1, 'V', 'V'),
'IMSIN' : ( -1, 1, 1, 'V', 'V'),
'IMSQRT' : ( -1, 1, 1, 'V', 'V'),
'IMSUB' : ( -1, 2, 2, 'V', 'VV'),
'IMSUM' : ( -1, 1, 29, 'V', 'V+'),
'INDEX' : ( 29, 2, 4, 'R', 'RVVV'),
'INDIRECT' : (148, 1, 2, 'R', 'VV'),
'INFO' : (244, 1, 1, 'V', 'V'),
'INT' : ( 25, 1, 1, 'V', 'V'),
'INTERCEPT' : (311, 2, 2, 'V', 'AA'),
'INTRATE' : ( -1, 4, 5, 'V', 'VVVVV'),
'IPMT' : (167, 4, 6, 'V', 'VVVVVV'),
'IRR' : ( 62, 1, 2, 'V', 'RV'),
'ISBLANK' : (129, 1, 1, 'V', 'V'),
'ISERR' : (126, 1, 1, 'V', 'V'),
'ISERROR' : ( 3, 1, 1, 'V', 'V'),
'ISEVEN' : ( -1, 1, 1, 'V', 'V'),
'ISLOGICAL' : (198, 1, 1, 'V', 'V'),
'ISNA' : ( 2, 1, 1, 'V', 'V'),
'ISNONTEXT' : (190, 1, 1, 'V', 'V'),
'ISNUMBER' : (128, 1, 1, 'V', 'V'),
'ISODD' : ( -1, 1, 1, 'V', 'V'),
'ISPMT' : (350, 4, 4, 'V', 'VVVV'),
'ISREF' : (105, 1, 1, 'V', 'R'),
'ISTEXT' : (127, 1, 1, 'V', 'V'),
'KURT' : (322, 1, 30, 'V', 'D+'),
'LARGE' : (325, 2, 2, 'V', 'RV'),
'LCM' : ( -1, 1, 29, 'V', 'V+'),
'LEFT' : (115, 1, 2, 'V', 'VV'),
'LEFTB' : (208, 1, 2, 'V', 'VV'),
'LEN' : ( 32, 1, 1, 'V', 'V'),
'LENB' : (211, 1, 1, 'V', 'V'),
'LINEST' : ( 49, 1, 4, 'A', 'RRVV'),
'LN' : ( 22, 1, 1, 'V', 'V'),
'LOG' : (109, 1, 2, 'V', 'VV'),
'LOG10' : ( 23, 1, 1, 'V', 'V'),
'LOGEST' : ( 51, 1, 4, 'A', 'RRVV'),
'LOGINV' : (291, 3, 3, 'V', 'VVV'),
'LOGNORMDIST' : (290, 3, 3, 'V', 'VVV'),
'LOOKUP' : ( 28, 2, 3, 'V', 'VRR'),
'LOWER' : (112, 1, 1, 'V', 'V'),
'MATCH' : ( 64, 2, 3, 'V', 'VRR'),
'MAX' : ( 7, 1, 30, 'V', 'D+'),
'MAXA' : (362, 1, 30, 'V', 'D+'),
'MDETERM' : (163, 1, 1, 'V', 'A'),
'MDURATION' : ( -1, 5, 6, 'V', 'VVVVVV'),
'MEDIAN' : (227, 1, 30, 'V', 'D+'),
'MID' : ( 31, 3, 3, 'V', 'VVV'),
'MIDB' : (210, 3, 3, 'V', 'VVV'),
'MIN' : ( 6, 1, 30, 'V', 'D+'),
'MINA' : (363, 1, 30, 'V', 'D+'),
'MINUTE' : ( 72, 1, 1, 'V', 'V'),
'MINVERSE' : (164, 1, 1, 'A', 'A'),
'MIRR' : ( 61, 3, 3, 'V', 'RVV'),
'MMULT' : (165, 2, 2, 'A', 'AA'),
'MOD' : ( 39, 2, 2, 'V', 'VV'),
'MODE' : (330, 1, 30, 'V', 'A+'), ################ weird #################
'MONTH' : ( 68, 1, 1, 'V', 'V'),
'MROUND' : ( -1, 2, 2, 'V', 'VV'),
'MULTINOMIAL' : ( -1, 1, 29, 'V', 'V+'),
'N' : (131, 1, 1, 'V', 'R'),
'NA' : ( 10, 0, 0, 'V', '-'),
'NEGBINOMDIST': (292, 3, 3, 'V', 'VVV'),
'NETWORKDAYS' : ( -1, 2, 3, 'V', 'VVR'),
'NOMINAL' : ( -1, 2, 2, 'V', 'VV'),
'NORMDIST' : (293, 4, 4, 'V', 'VVVV'),
'NORMINV' : (295, 3, 3, 'V', 'VVV'),
'NORMSDIST' : (294, 1, 1, 'V', 'V'),
'NORMSINV' : (296, 1, 1, 'V', 'V'),
'NOT' : ( 38, 1, 1, 'V', 'V'),
'NOW' : ( 74, 0, 0, 'V', '-'),
'NPER' : ( 58, 3, 5, 'V', 'VVVVV'),
'NPV' : ( 11, 2, 30, 'V', 'VD+'),
'OCT2BIN' : ( -1, 1, 2, 'V', 'VV'),
'OCT2DEC' : ( -1, 1, 1, 'V', 'V'),
'OCT2HEX' : ( -1, 1, 2, 'V', 'VV'),
'ODD' : (298, 1, 1, 'V', 'V'),
'ODDFPRICE' : ( -1, 9, 9, 'V', 'VVVVVVVVV'),
'ODDFYIELD' : ( -1, 9, 9, 'V', 'VVVVVVVVV'),
'ODDLPRICE' : ( -1, 8, 8, 'V', 'VVVVVVVV'),
'ODDLYIELD' : ( -1, 8, 8, 'V', 'VVVVVVVV'),
'OFFSET' : ( 78, 3, 5, 'R', 'RVVVV'),
'OR' : ( 37, 1, 30, 'V', 'D+'),
'PEARSON' : (312, 2, 2, 'V', 'AA'),
'PERCENTILE' : (328, 2, 2, 'V', 'RV'),
'PERCENTRANK' : (329, 2, 3, 'V', 'RVV'),
'PERMUT' : (299, 2, 2, 'V', 'VV'),
'PHONETIC' : (360, 1, 1, 'V', 'R'),
'PI' : ( 19, 0, 0, 'V', '-'),
'PMT' : ( 59, 3, 5, 'V', 'VVVVV'),
'POISSON' : (300, 3, 3, 'V', 'VVV'),
'POWER' : (337, 2, 2, 'V', 'VV'),
'PPMT' : (168, 4, 6, 'V', 'VVVVVV'),
'PRICE' : ( -1, 6, 7, 'V', 'VVVVVVV'),
'PRICEDISC' : ( -1, 4, 5, 'V', 'VVVVV'),
'PRICEMAT' : ( -1, 5, 6, 'V', 'VVVVVV'),
'PROB' : (317, 3, 4, 'V', 'AAVV'),
'PRODUCT' : (183, 1, 30, 'V', 'D+'),
'PROPER' : (114, 1, 1, 'V', 'V'),
'PV' : ( 56, 3, 5, 'V', 'VVVVV'),
'QUARTILE' : (327, 2, 2, 'V', 'RV'),
'QUOTIENT' : ( -1, 2, 2, 'V', 'VV'),
'RADIANS' : (342, 1, 1, 'V', 'V'),
'RAND' : ( 63, 0, 0, 'V', '-'),
'RANDBETWEEN' : ( -1, 2, 2, 'V', 'VV'),
'RANK' : (216, 2, 3, 'V', 'VRV'),
'RATE' : ( 60, 3, 6, 'V', 'VVVVVV'),
'RECEIVED' : ( -1, 4, 5, 'V', 'VVVVV'),
'REPLACE' : (119, 4, 4, 'V', 'VVVV'),
'REPLACEB' : (207, 4, 4, 'V', 'VVVV'),
'REPT' : ( 30, 2, 2, 'V', 'VV'),
'RIGHT' : (116, 1, 2, 'V', 'VV'),
'RIGHTB' : (209, 1, 2, 'V', 'VV'),
'ROMAN' : (354, 1, 2, 'V', 'VV'),
'ROUND' : ( 27, 2, 2, 'V', 'VV'),
'ROUNDDOWN' : (213, 2, 2, 'V', 'VV'),
'ROUNDUP' : (212, 2, 2, 'V', 'VV'),
'ROW' : ( 8, 0, 1, 'V', 'R'),
'ROWS' : ( 76, 1, 1, 'V', 'R'),
'RSQ' : (313, 2, 2, 'V', 'AA'),
'RTD' : (379, 3, 30, 'A', 'VVV+'),
'SEARCH' : ( 82, 2, 3, 'V', 'VVV'),
'SEARCHB' : (206, 2, 3, 'V', 'VVV'),
'SECOND' : ( 73, 1, 1, 'V', 'V'),
'SERIESSUM' : ( -1, 4, 4, 'V', 'VVVA'),
'SIGN' : ( 26, 1, 1, 'V', 'V'),
'SIN' : ( 15, 1, 1, 'V', 'V'),
'SINH' : (229, 1, 1, 'V', 'V'),
'SKEW' : (323, 1, 30, 'V', 'D+'),
'SLN' : (142, 3, 3, 'V', 'VVV'),
'SLOPE' : (315, 2, 2, 'V', 'AA'),
'SMALL' : (326, 2, 2, 'V', 'RV'),
'SQRT' : ( 20, 1, 1, 'V', 'V'),
'SQRTPI' : ( -1, 1, 1, 'V', 'V'),
'STANDARDIZE' : (297, 3, 3, 'V', 'VVV'),
'STDEV' : ( 12, 1, 30, 'V', 'D+'),
'STDEVA' : (366, 1, 30, 'V', 'D+'),
'STDEVP' : (193, 1, 30, 'V', 'D+'),
'STDEVPA' : (364, 1, 30, 'V', 'D+'),
'STEYX' : (314, 2, 2, 'V', 'AA'),
'SUBSTITUTE' : (120, 3, 4, 'V', 'VVVV'),
'SUBTOTAL' : (344, 2, 30, 'V', 'VR+'),
'SUM' : ( 4, 1, 30, 'V', 'D+'),
'SUMIF' : (345, 2, 3, 'V', 'RVR'),
'SUMPRODUCT' : (228, 1, 30, 'V', 'A+'),
'SUMSQ' : (321, 1, 30, 'V', 'D+'),
'SUMX2MY2' : (304, 2, 2, 'V', 'AA'),
'SUMX2PY2' : (305, 2, 2, 'V', 'AA'),
'SUMXMY2' : (303, 2, 2, 'V', 'AA'),
'SYD' : (143, 4, 4, 'V', 'VVVV'),
'T' : (130, 1, 1, 'V', 'R'),
'TAN' : ( 17, 1, 1, 'V', 'V'),
'TANH' : (231, 1, 1, 'V', 'V'),
'TBILLEQ' : ( -1, 3, 3, 'V', 'VVV'),
'TBILLPRICE' : ( -1, 3, 3, 'V', 'VVV'),
'TBILLYIELD' : ( -1, 3, 3, 'V', 'VVV'),
'TDIST' : (301, 3, 3, 'V', 'VVV'),
'TEXT' : ( 48, 2, 2, 'V', 'VV'),
'TIME' : ( 66, 3, 3, 'V', 'VVV'),
'TIMEVALUE' : (141, 1, 1, 'V', 'V'),
'TINV' : (332, 2, 2, 'V', 'VV'),
'TODAY' : (221, 0, 0, 'V', '-'),
'TRANSPOSE' : ( 83, 1, 1, 'A', 'A'),
'TREND' : ( 50, 1, 4, 'A', 'RRRV'),
'TRIM' : (118, 1, 1, 'V', 'V'),
'TRIMMEAN' : (331, 2, 2, 'V', 'RV'),
'TRUE' : ( 34, 0, 0, 'V', '-'),
'TRUNC' : (197, 1, 2, 'V', 'VV'),
'TTEST' : (316, 4, 4, 'V', 'AAVV'),
'TYPE' : ( 86, 1, 1, 'V', 'V'),
'UPPER' : (113, 1, 1, 'V', 'V'),
'USDOLLAR' : (204, 1, 2, 'V', 'VV'),
'VALUE' : ( 33, 1, 1, 'V', 'V'),
'VAR' : ( 46, 1, 30, 'V', 'D+'),
'VARA' : (367, 1, 30, 'V', 'D+'),
'VARP' : (194, 1, 30, 'V', 'D+'),
'VARPA' : (365, 1, 30, 'V', 'D+'),
'VDB' : (222, 5, 7, 'V', 'VVVVVVV'),
'VLOOKUP' : (102, 3, 4, 'V', 'VRRV'),
'WEEKDAY' : ( 70, 1, 2, 'V', 'VV'),
'WEEKNUM' : ( -1, 1, 2, 'V', 'VV'),
'WEIBULL' : (302, 4, 4, 'V', 'VVVV'),
'WORKDAY' : ( -1, 2, 3, 'V', 'VVR'),
'XIRR' : ( -1, 2, 3, 'V', 'AAV'),
'XNPV' : ( -1, 3, 3, 'V', 'VAA'),
'YEAR' : ( 69, 1, 1, 'V', 'V'),
'YEARFRAC' : ( -1, 2, 3, 'V', 'VVV'),
'YIELD' : ( -1, 6, 7, 'V', 'VVVVVVV'),
'YIELDDISC' : ( -1, 4, 5, 'V', 'VVVVV'),
'YIELDMAT' : ( -1, 5, 6, 'V', 'VVVVVV'),
'ZTEST' : (324, 2, 3, 'V', 'RVV'),
}
# Formulas Parse things
ptgExp = 0x01
ptgTbl = 0x02
ptgAdd = 0x03
ptgSub = 0x04
ptgMul = 0x05
ptgDiv = 0x06
ptgPower = 0x07
ptgConcat = 0x08
ptgLT = 0x09
ptgLE = 0x0a
ptgEQ = 0x0b
ptgGE = 0x0c
ptgGT = 0x0d
ptgNE = 0x0e
ptgIsect = 0x0f
ptgUnion = 0x10
ptgRange = 0x11
ptgUplus = 0x12
ptgUminus = 0x13
ptgPercent = 0x14
ptgParen = 0x15
ptgMissArg = 0x16
ptgStr = 0x17
ptgExtend = 0x18
ptgAttr = 0x19
ptgSheet = 0x1a
ptgEndSheet = 0x1b
ptgErr = 0x1c
ptgBool = 0x1d
ptgInt = 0x1e
ptgNum = 0x1f
ptgArrayR = 0x20
ptgFuncR = 0x21
ptgFuncVarR = 0x22
ptgNameR = 0x23
ptgRefR = 0x24
ptgAreaR = 0x25
ptgMemAreaR = 0x26
ptgMemErrR = 0x27
ptgMemNoMemR = 0x28
ptgMemFuncR = 0x29
ptgRefErrR = 0x2a
ptgAreaErrR = 0x2b
ptgRefNR = 0x2c
ptgAreaNR = 0x2d
ptgMemAreaNR = 0x2e
ptgMemNoMemNR = 0x2f
ptgNameXR = 0x39
ptgRef3dR = 0x3a
ptgArea3dR = 0x3b
ptgRefErr3dR = 0x3c
ptgAreaErr3dR = 0x3d
ptgArrayV = 0x40
ptgFuncV = 0x41
ptgFuncVarV = 0x42
ptgNameV = 0x43
ptgRefV = 0x44
ptgAreaV = 0x45
ptgMemAreaV = 0x46
ptgMemErrV = 0x47
ptgMemNoMemV = 0x48
ptgMemFuncV = 0x49
ptgRefErrV = 0x4a
ptgAreaErrV = 0x4b
ptgRefNV = 0x4c
ptgAreaNV = 0x4d
ptgMemAreaNV = 0x4e
ptgMemNoMemNV = 0x4f
ptgFuncCEV = 0x58
ptgNameXV = 0x59
ptgRef3dV = 0x5a
ptgArea3dV = 0x5b
ptgRefErr3dV = 0x5c
ptgAreaErr3dV = 0x5d
ptgArrayA = 0x60
ptgFuncA = 0x61
ptgFuncVarA = 0x62
ptgNameA = 0x63
ptgRefA = 0x64
ptgAreaA = 0x65
ptgMemAreaA = 0x66
ptgMemErrA = 0x67
ptgMemNoMemA = 0x68
ptgMemFuncA = 0x69
ptgRefErrA = 0x6a
ptgAreaErrA = 0x6b
ptgRefNA = 0x6c
ptgAreaNA = 0x6d
ptgMemAreaNA = 0x6e
ptgMemNoMemNA = 0x6f
ptgFuncCEA = 0x78
ptgNameXA = 0x79
ptgRef3dA = 0x7a
ptgArea3dA = 0x7b
ptgRefErr3dA = 0x7c
ptgAreaErr3dA = 0x7d
PtgNames = {
ptgExp : "ptgExp",
ptgTbl : "ptgTbl",
ptgAdd : "ptgAdd",
ptgSub : "ptgSub",
ptgMul : "ptgMul",
ptgDiv : "ptgDiv",
ptgPower : "ptgPower",
ptgConcat : "ptgConcat",
ptgLT : "ptgLT",
ptgLE : "ptgLE",
ptgEQ : "ptgEQ",
ptgGE : "ptgGE",
ptgGT : "ptgGT",
ptgNE : "ptgNE",
ptgIsect : "ptgIsect",
ptgUnion : "ptgUnion",
ptgRange : "ptgRange",
ptgUplus : "ptgUplus",
ptgUminus : "ptgUminus",
ptgPercent : "ptgPercent",
ptgParen : "ptgParen",
ptgMissArg : "ptgMissArg",
ptgStr : "ptgStr",
ptgExtend : "ptgExtend",
ptgAttr : "ptgAttr",
ptgSheet : "ptgSheet",
ptgEndSheet : "ptgEndSheet",
ptgErr : "ptgErr",
ptgBool : "ptgBool",
ptgInt : "ptgInt",
ptgNum : "ptgNum",
ptgArrayR : "ptgArrayR",
ptgFuncR : "ptgFuncR",
ptgFuncVarR : "ptgFuncVarR",
ptgNameR : "ptgNameR",
ptgRefR : "ptgRefR",
ptgAreaR : "ptgAreaR",
ptgMemAreaR : "ptgMemAreaR",
ptgMemErrR : "ptgMemErrR",
ptgMemNoMemR : "ptgMemNoMemR",
ptgMemFuncR : "ptgMemFuncR",
ptgRefErrR : "ptgRefErrR",
ptgAreaErrR : "ptgAreaErrR",
ptgRefNR : "ptgRefNR",
ptgAreaNR : "ptgAreaNR",
ptgMemAreaNR : "ptgMemAreaNR",
ptgMemNoMemNR : "ptgMemNoMemNR",
ptgNameXR : "ptgNameXR",
ptgRef3dR : "ptgRef3dR",
ptgArea3dR : "ptgArea3dR",
ptgRefErr3dR : "ptgRefErr3dR",
ptgAreaErr3dR : "ptgAreaErr3dR",
ptgArrayV : "ptgArrayV",
ptgFuncV : "ptgFuncV",
ptgFuncVarV : "ptgFuncVarV",
ptgNameV : "ptgNameV",
ptgRefV : "ptgRefV",
ptgAreaV : "ptgAreaV",
ptgMemAreaV : "ptgMemAreaV",
ptgMemErrV : "ptgMemErrV",
ptgMemNoMemV : "ptgMemNoMemV",
ptgMemFuncV : "ptgMemFuncV",
ptgRefErrV : "ptgRefErrV",
ptgAreaErrV : "ptgAreaErrV",
ptgRefNV : "ptgRefNV",
ptgAreaNV : "ptgAreaNV",
ptgMemAreaNV : "ptgMemAreaNV",
ptgMemNoMemNV : "ptgMemNoMemNV",
ptgFuncCEV : "ptgFuncCEV",
ptgNameXV : "ptgNameXV",
ptgRef3dV : "ptgRef3dV",
ptgArea3dV : "ptgArea3dV",
ptgRefErr3dV : "ptgRefErr3dV",
ptgAreaErr3dV : "ptgAreaErr3dV",
ptgArrayA : "ptgArrayA",
ptgFuncA : "ptgFuncA",
ptgFuncVarA : "ptgFuncVarA",
ptgNameA : "ptgNameA",
ptgRefA : "ptgRefA",
ptgAreaA : "ptgAreaA",
ptgMemAreaA : "ptgMemAreaA",
ptgMemErrA : "ptgMemErrA",
ptgMemNoMemA : "ptgMemNoMemA",
ptgMemFuncA : "ptgMemFuncA",
ptgRefErrA : "ptgRefErrA",
ptgAreaErrA : "ptgAreaErrA",
ptgRefNA : "ptgRefNA",
ptgAreaNA : "ptgAreaNA",
ptgMemAreaNA : "ptgMemAreaNA",
ptgMemNoMemNA : "ptgMemNoMemNA",
ptgFuncCEA : "ptgFuncCEA",
ptgNameXA : "ptgNameXA",
ptgRef3dA : "ptgRef3dA",
ptgArea3dA : "ptgArea3dA",
ptgRefErr3dA : "ptgRefErr3dA",
ptgAreaErr3dA : "ptgAreaErr3dA"
}
error_msg_by_code = {
0x00: "#NULL!", # intersection of two cell ranges is empty
0x07: "#DIV/0!", # division by zero
0x0F: "#VALUE!", # wrong type of operand
0x17: "#REF!", # illegal or deleted cell reference
0x1D: "#NAME?", # wrong function or range name
0x24: "#NUM!", # value range overflow
0x2A: "#N/A!" # argument or function not available
}
| apache-2.0 |
NeuralEnsemble/python-neo | neo/core/regionofinterest.py | 5 | 5411 | from math import floor, ceil
class RegionOfInterest:
"""Abstract base class"""
pass
class CircularRegionOfInterest(RegionOfInterest):
"""Representation of a circular ROI
*Usage:*
>>> roi = CircularRegionOfInterest(20.0, 20.0, radius=5.0)
>>> signal = image_sequence.signal_from_region(roi)
*Required attributes/properties*:
:x, y: (integers or floats)
Pixel coordinates of the centre of the ROI
:radius: (integer or float)
Radius of the ROI in pixels
"""
def __init__(self, x, y, radius):
self.y = y
self.x = x
self.radius = radius
@property
def centre(self):
return (self.x, self.y)
@property
def center(self):
return self.centre
def is_inside(self, x, y):
if ((x - self.x) * (x - self.x) +
(y - self.y) * (y - self.y) <= self.radius * self.radius):
return True
else:
return False
def pixels_in_region(self):
"""Returns a list of pixels whose *centres* are within the circle"""
pixel_in_list = []
for y in range(int(floor(self.y - self.radius)), int(ceil(self.y + self.radius))):
for x in range(int(floor(self.x - self.radius)), int(ceil(self.x + self.radius))):
if self.is_inside(x, y):
pixel_in_list.append([x, y])
return pixel_in_list
class RectangularRegionOfInterest(RegionOfInterest):
"""Representation of a rectangular ROI
*Usage:*
>>> roi = RectangularRegionOfInterest(20.0, 20.0, width=5.0, height=5.0)
>>> signal = image_sequence.signal_from_region(roi)
*Required attributes/properties*:
:x, y: (integers or floats)
Pixel coordinates of the centre of the ROI
:width: (integer or float)
Width (x-direction) of the ROI in pixels
:height: (integer or float)
Height (y-direction) of the ROI in pixels
"""
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
def is_inside(self, x, y):
if (self.x - self.width/2.0 <= x < self.x + self.width/2.0
and self.y - self.height/2.0 <= y < self.y + self.height/2.0):
return True
else:
return False
def pixels_in_region(self):
"""Returns a list of pixels whose *centres* are within the rectangle"""
pixel_list = []
h = self.height
w = self.width
for y in range(int(floor(self.y - h / 2.0)), int(ceil(self.y + h / 2.0))):
for x in range(int(floor(self.x - w / 2.0)), int(ceil(self.x + w / 2.0))):
if self.is_inside(x, y):
pixel_list.append([x, y])
return pixel_list
class PolygonRegionOfInterest(RegionOfInterest):
"""Representation of a polygonal ROI
*Usage:*
>>> roi = PolygonRegionOfInterest(
... (20.0, 20.0),
... (30.0, 20.0),
... (25.0, 25.0)
... )
>>> signal = image_sequence.signal_from_region(roi)
*Required attributes/properties*:
:vertices:
tuples containing the (x, y) coordinates, as integers or floats,
of the vertices of the polygon
"""
def __init__(self, *vertices):
self.vertices = vertices
def polygon_ray_casting(self, bounding_points, bounding_box_positions):
# from https://stackoverflow.com/questions/217578/how-can-i-determine-whether-a-2d-point-is-within-a-polygon
# user Noresourses
# Arrays containing the x- and y-coordinates of the polygon's vertices.
vertx = [point[0] for point in bounding_points]
verty = [point[1] for point in bounding_points]
# Number of vertices in the polygon
nvert = len(bounding_points)
# Points that are inside
points_inside = []
# For every candidate position within the bounding box
for idx, pos in enumerate(bounding_box_positions):
testx, testy = (pos[0], pos[1])
c = 0
for i in range(0, nvert):
j = i - 1 if i != 0 else nvert - 1
if (((verty[i]*1.0 > testy*1.0) != (verty[j]*1.0 > testy*1.0)) and
(testx*1.0 < (vertx[j]*1.0 - vertx[i]*1.0) * (testy*1.0 - verty[i]*1.0) /
(verty[j]*1.0 - verty[i]*1.0) + vertx[i]*1.0)):
c += 1
# If odd, that means that we are inside the polygon
if c % 2 == 1:
points_inside.append(pos)
return points_inside
def pixels_in_region(self):
min_x, max_x, min_y, max_y = (self.vertices[0][0], self.vertices[0][0],
self.vertices[0][1], self.vertices[0][1])
for i in self.vertices:
if i[0] < min_x:
min_x = i[0]
if i[0] > max_x:
max_x = i[0]
if i[1] < min_y:
min_y = i[1]
if i[1] > max_y:
max_y = i[1]
list_coord = []
for y in range(int(floor(min_y)), int(ceil(max_y))):
for x in range(int(floor(min_x)), int(ceil(max_x))):
list_coord.append((x, y))
pixel_list = self.polygon_ray_casting(self.vertices, list_coord)
return pixel_list
| bsd-3-clause |
AccaliaDeElementia/Spearmint | Comic.py | 1 | 4094 | #!/usr/bin/env python3
'''Spearmint Comic
Comic plugin for Spearmint
'''
import os
from time import mktime
from zipfile import is_zipfile, ZipFile
import mimetypes
import cherrypy
import Utils
mimetypes.init()
class Comic(object):
'''Spearmint Comic Plugin Main Class'''
def __init__(self, path=None):
'''Create Comic Library from `path`'''
if not path:
path = './'
self.prefix = path
@cherrypy.expose
@Utils.jsonify
def list(self, *args, **kwargs):
'''Get Directory Listing for Comic Library'''
Utils.allow_methods(kwargs=kwargs)
path = '/'.join(args)
if not os.path.isdir(self.prefix+path):
raise cherrypy.HTTPError(404, 'Requested Directory Not Exists')
retval = {
'dirs': [],
'files': []
}
base = self.prefix + path
for item in Utils.sort_list(os.listdir(base), lambda x: x.lower()):
safepath = path + '/' + item
filename = base + '/' + item
if os.path.isdir(filename):
retval['dirs'].append(safepath)
elif is_zipfile(filename):
retval['files'].append(safepath)
return retval
@cherrypy.expose
@Utils.jsonify
def info(self, *args, **kwargs):
'''Get Info About a Comic'''
Utils.allow_methods(kwargs=kwargs)
if len(args) < 1:
raise cherrypy.HTTPError(400, 'No Comic Path Requested')
path = '/'.join(args)
filename = self.prefix + path
if not is_zipfile(filename):
raise cherrypy.HTTPError(404, 'Requested Comic Not Exists')
comic = ZipFile(filename)
name = Utils.filename_to_name(filename)
comment = str(comic.comment, 'utf8')
pages = Utils.sort_list([info for info in comic.infolist()
if Utils.is_image(info.filename)], lambda x: x.filename.lower())
modified = Utils.date_to_httpdate(os.path.getmtime(filename))
etag = Utils.make_etag(path, modified)
Utils.validate_conditional(etag, modified)
return {
'name': name,
'comment': comment,
'pagecount': len(pages),
'pages': [
{
'filename': page.filename,
'name': Utils.filename_to_name(page.filename),
'comment': str(page.comment, 'utf8'),
'size': page.file_size
}
for page in pages
]
}
@cherrypy.expose
def image(self, *args, **kwargs):
'''Get Image From Comic'''
Utils.allow_methods(kwargs=kwargs)
if len(args) < 2:
raise cherrypy.HTTPError(400, 'No Comic Path Requested')
path = '/'.join(args[:-1])
index = -1
try:
index = int(args[-1])
except ValueError:
raise cherrypy.HTTPError(400, 'No Comic Page Requested')
index -= 1
base = self.prefix + path
if not is_zipfile(base):
raise cherrypy.HTTPError(404, 'Requested Comic Not Exists')
comic = ZipFile(base)
pages = Utils.sort_list([info for info in comic.infolist()
if Utils.is_image(info.filename)], lambda x: x.filename.lower())
if index < 0 or index >= len(pages):
raise cherrypy.HTTPError(404, 'Requested Comic Page Not Exists')
page = pages[index]
modified = Utils.date_to_httpdate(mktime(page.date_time + (0, 0, 0)))
etag = Utils.make_etag(path, index, modified)
Utils.validate_conditional(etag, modified)
types = mimetypes.guess_type(pages[index].filename)
cherrypy.response.headers['Content-Type'] = types[0]
return comic.read(pages[index])
if __name__ == '__main__':
cherrypy.tree.mount(Comic(), '/', {
'/': {
'tools.encode.on': True,
'tools.gzip.on': True
}
})
cherrypy.config.update({'server.socket_host': '0.0.0.0'})
cherrypy.engine.start()
cherrypy.engine.block()
| mit |
Stavitsky/nova | nova/tests/unit/network/test_rpcapi.py | 27 | 13204 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for nova.network.rpcapi
"""
import collections
import contextlib
import mock
from mox3 import mox
from oslo_config import cfg
from nova import context
from nova.network import rpcapi as network_rpcapi
from nova import test
from nova.tests.unit import fake_instance
CONF = cfg.CONF
class NetworkRpcAPITestCase(test.NoDBTestCase):
def setUp(self):
super(NetworkRpcAPITestCase, self).setUp()
self.flags(multi_host=True)
# Used to specify the default value expected if no real value is passed
DefaultArg = collections.namedtuple('DefaultArg', ['value'])
def _test_network_api(self, method, rpc_method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = network_rpcapi.NetworkAPI()
self.assertIsNotNone(rpcapi.client)
self.assertEqual(rpcapi.client.target.topic, CONF.network_topic)
expected_retval = 'foo' if rpc_method == 'call' else None
expected_version = kwargs.pop('version', None)
expected_fanout = kwargs.pop('fanout', None)
expected_kwargs = kwargs.copy()
for k, v in expected_kwargs.items():
if isinstance(v, self.DefaultArg):
expected_kwargs[k] = v.value
kwargs.pop(k)
prepare_kwargs = {}
if expected_version:
prepare_kwargs['version'] = expected_version
if expected_fanout:
prepare_kwargs['fanout'] = True
if 'source_compute' in expected_kwargs:
# Fix up for migrate_instance_* calls.
expected_kwargs['source'] = expected_kwargs.pop('source_compute')
expected_kwargs['dest'] = expected_kwargs.pop('dest_compute')
targeted_methods = [
'lease_fixed_ip', 'release_fixed_ip', 'rpc_setup_network_on_host',
'_rpc_allocate_fixed_ip', 'deallocate_fixed_ip', 'update_dns',
'_associate_floating_ip', '_disassociate_floating_ip',
'lease_fixed_ip', 'release_fixed_ip', 'migrate_instance_start',
'migrate_instance_finish',
'allocate_for_instance', 'deallocate_for_instance',
]
targeted_by_instance = ['deallocate_for_instance']
if method in targeted_methods and ('host' in expected_kwargs or
'instance' in expected_kwargs):
if method in targeted_by_instance:
host = expected_kwargs['instance']['host']
else:
host = expected_kwargs['host']
if method not in ['allocate_for_instance',
'deallocate_fixed_ip']:
expected_kwargs.pop('host')
if CONF.multi_host:
prepare_kwargs['server'] = host
self.mox.StubOutWithMock(rpcapi, 'client')
version_check = [
'deallocate_for_instance', 'deallocate_fixed_ip',
'allocate_for_instance', 'release_fixed_ip',
]
if method in version_check:
rpcapi.client.can_send_version(mox.IgnoreArg()).AndReturn(True)
if prepare_kwargs:
rpcapi.client.prepare(**prepare_kwargs).AndReturn(rpcapi.client)
rpc_method = getattr(rpcapi.client, rpc_method)
rpc_method(ctxt, method, **expected_kwargs).AndReturn('foo')
self.mox.ReplayAll()
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, expected_retval)
def test_create_networks(self):
self._test_network_api('create_networks', rpc_method='call',
arg1='arg', arg2='arg')
def test_delete_network(self):
self._test_network_api('delete_network', rpc_method='call',
uuid='fake_uuid', fixed_range='range')
def test_allocate_for_instance(self):
self._test_network_api('allocate_for_instance', rpc_method='call',
instance_id='fake_id', project_id='fake_id', host='fake_host',
rxtx_factor='fake_factor', vpn=False, requested_networks={},
macs=[], version='1.13')
def test_deallocate_for_instance(self):
instance = fake_instance.fake_instance_obj(context.get_admin_context())
self._test_network_api('deallocate_for_instance', rpc_method='call',
requested_networks=self.DefaultArg(None), instance=instance,
version='1.11')
def test_deallocate_for_instance_with_expected_networks(self):
instance = fake_instance.fake_instance_obj(context.get_admin_context())
self._test_network_api('deallocate_for_instance', rpc_method='call',
instance=instance, requested_networks={}, version='1.11')
def test_add_fixed_ip_to_instance(self):
self._test_network_api('add_fixed_ip_to_instance', rpc_method='call',
instance_id='fake_id', rxtx_factor='fake_factor',
host='fake_host', network_id='fake_id', version='1.9')
def test_remove_fixed_ip_from_instance(self):
self._test_network_api('remove_fixed_ip_from_instance',
rpc_method='call', instance_id='fake_id',
rxtx_factor='fake_factor', host='fake_host',
address='fake_address', version='1.9')
def test_add_network_to_project(self):
self._test_network_api('add_network_to_project', rpc_method='call',
project_id='fake_id', network_uuid='fake_uuid')
def test_get_instance_nw_info(self):
self._test_network_api('get_instance_nw_info', rpc_method='call',
instance_id='fake_id', rxtx_factor='fake_factor',
host='fake_host', project_id='fake_id', version='1.9')
def test_validate_networks(self):
self._test_network_api('validate_networks', rpc_method='call',
networks={})
def test_get_dns_domains(self):
self._test_network_api('get_dns_domains', rpc_method='call')
def test_add_dns_entry(self):
self._test_network_api('add_dns_entry', rpc_method='call',
address='addr', name='name', dns_type='foo', domain='domain')
def test_modify_dns_entry(self):
self._test_network_api('modify_dns_entry', rpc_method='call',
address='addr', name='name', domain='domain')
def test_delete_dns_entry(self):
self._test_network_api('delete_dns_entry', rpc_method='call',
name='name', domain='domain')
def test_delete_dns_domain(self):
self._test_network_api('delete_dns_domain', rpc_method='call',
domain='fake_domain')
def test_get_dns_entries_by_address(self):
self._test_network_api('get_dns_entries_by_address', rpc_method='call',
address='fake_address', domain='fake_domain')
def test_get_dns_entries_by_name(self):
self._test_network_api('get_dns_entries_by_name', rpc_method='call',
name='fake_name', domain='fake_domain')
def test_create_private_dns_domain(self):
self._test_network_api('create_private_dns_domain', rpc_method='call',
domain='fake_domain', av_zone='fake_zone')
def test_create_public_dns_domain(self):
self._test_network_api('create_public_dns_domain', rpc_method='call',
domain='fake_domain', project='fake_project')
def test_setup_networks_on_host(self):
self._test_network_api('setup_networks_on_host', rpc_method='call',
instance_id='fake_id', host='fake_host', teardown=False)
def test_lease_fixed_ip(self):
self._test_network_api('lease_fixed_ip', rpc_method='cast',
host='fake_host', address='fake_addr')
def test_release_fixed_ip(self):
self._test_network_api('release_fixed_ip', rpc_method='cast',
host='fake_host', address='fake_addr', mac='fake_mac',
version='1.14')
def test_release_fixed_ip_no_mac_support(self):
# Tests that the mac kwarg is not passed when we can't send version
# 1.14 to the network manager.
ctxt = context.RequestContext('fake_user', 'fake_project')
address = '192.168.65.158'
host = 'fake-host'
mac = '00:0c:29:2c:b2:64'
rpcapi = network_rpcapi.NetworkAPI()
cast_mock = mock.Mock()
cctxt_mock = mock.Mock(cast=cast_mock)
with contextlib.nested(
mock.patch.object(rpcapi.client, 'can_send_version',
return_value=False),
mock.patch.object(rpcapi.client, 'prepare',
return_value=cctxt_mock)
) as (
can_send_mock, prepare_mock
):
rpcapi.release_fixed_ip(ctxt, address, host, mac)
# assert our mocks were called as expected 232
can_send_mock.assert_called_once_with('1.14')
prepare_mock.assert_called_once_with(server=host, version='1.0')
cast_mock.assert_called_once_with(ctxt, 'release_fixed_ip',
address=address)
def test_set_network_host(self):
self._test_network_api('set_network_host', rpc_method='call',
network_ref={})
def test_rpc_setup_network_on_host(self):
self._test_network_api('rpc_setup_network_on_host', rpc_method='call',
network_id='fake_id', teardown=False, host='fake_host')
def test_rpc_allocate_fixed_ip(self):
self._test_network_api('_rpc_allocate_fixed_ip', rpc_method='call',
instance_id='fake_id', network_id='fake_id', address='addr',
vpn=True, host='fake_host')
def test_deallocate_fixed_ip(self):
instance = fake_instance.fake_db_instance()
self._test_network_api('deallocate_fixed_ip', rpc_method='call',
address='fake_addr', host='fake_host', instance=instance,
version='1.12')
def test_update_dns(self):
self._test_network_api('update_dns', rpc_method='cast', fanout=True,
network_ids='fake_id', version='1.3')
def test__associate_floating_ip(self):
self._test_network_api('_associate_floating_ip', rpc_method='call',
floating_address='fake_addr', fixed_address='fixed_address',
interface='fake_interface', host='fake_host',
instance_uuid='fake_uuid', version='1.6')
def test__disassociate_floating_ip(self):
self._test_network_api('_disassociate_floating_ip', rpc_method='call',
address='fake_addr', interface='fake_interface',
host='fake_host', instance_uuid='fake_uuid', version='1.6')
def test_migrate_instance_start(self):
self._test_network_api('migrate_instance_start', rpc_method='call',
instance_uuid='fake_instance_uuid',
rxtx_factor='fake_factor',
project_id='fake_project',
source_compute='fake_src_compute',
dest_compute='fake_dest_compute',
floating_addresses='fake_floating_addresses',
host=self.DefaultArg(None),
version='1.2')
def test_migrate_instance_start_multi_host(self):
self._test_network_api('migrate_instance_start', rpc_method='call',
instance_uuid='fake_instance_uuid',
rxtx_factor='fake_factor',
project_id='fake_project',
source_compute='fake_src_compute',
dest_compute='fake_dest_compute',
floating_addresses='fake_floating_addresses',
host='fake_host',
version='1.2')
def test_migrate_instance_finish(self):
self._test_network_api('migrate_instance_finish', rpc_method='call',
instance_uuid='fake_instance_uuid',
rxtx_factor='fake_factor',
project_id='fake_project',
source_compute='fake_src_compute',
dest_compute='fake_dest_compute',
floating_addresses='fake_floating_addresses',
host=self.DefaultArg(None),
version='1.2')
def test_migrate_instance_finish_multi_host(self):
self._test_network_api('migrate_instance_finish', rpc_method='call',
instance_uuid='fake_instance_uuid',
rxtx_factor='fake_factor',
project_id='fake_project',
source_compute='fake_src_compute',
dest_compute='fake_dest_compute',
floating_addresses='fake_floating_addresses',
host='fake_host',
version='1.2')
| apache-2.0 |
ArtRand/signalAlign | scripts/hdp_pipeline.py | 2 | 13032 | #!/usr/bin/env python
"""Master pipeline script for generating trained HDPs for MinION signal data
Input: alignments using non-HDP model
Output: trained HDP and model
The objective of this pipeline is to:
1. use input alignments to make 'build alignment' for generating the initial HDP
2. generates the initial HDP
3. trains the HDP on MinION reads
4. outputs distributions for all kmers from the HDP
"""
import os
import sys
from argparse import ArgumentParser
from subprocess import check_call, Popen
from shutil import copyfile
def parse_args():
parser = ArgumentParser(description=__doc__)
# build alignment
parser.add_argument('--C_alignments', '-C', action='store',
dest='C_alns', required=False, type=str, default=None,
help="C files")
parser.add_argument('--mC_alignments', '-mC', action='store',
dest='mC_alns', required=False, type=str, default=None,
help="mC files")
parser.add_argument('--hmC_alignments', '-hmC', action='store',
dest='hmC_alns', required=False, type=str, default=None,
help="hmC files")
parser.add_argument('--number_of_assignments', '-n', action='store', type=int, default=10000,
dest='max_assignments',
help='total number of assignments to collect FOR EACH GROUP')
# initial HDP
parser.add_argument('--build_alignment', action='store', type=str, default=None,
required=False, dest='build_alignment')
parser.add_argument('--threshold', '-t', action='store', type=float, default=0.0, dest='threshold')
parser.add_argument('--hdp_type', action='store', type=str, required=False, dest='hdp_type', default='Prior',
help="Build Hdp, specify type, options: "
"Prior, Fixed, twoWay. twoWay is a Prior-type model (recommended)")
parser.add_argument('--template_model', '-tM', action='store', type=str, dest='template_lookup',
required=True, help="Input template lookup table")
parser.add_argument('--complement_model', '-cM', action='store', type=str, dest='complement_lookup',
required=True, help="Input complement lookup table")
# fixed concentration models
parser.add_argument('--base_gamma', '-B', action='store', type=float, default=1.0, dest='base_gamma',
required=False)
parser.add_argument('--middle_gamma', '-M', action='store', type=float, default=1.0, dest='middle_gamma',
required=False)
parser.add_argument('--leaf_gamma', '-L', action='store', type=float, default=1.0, dest='leaf_gamma',
required=False)
# gamma prior models
parser.add_argument('--base_alpha', '-Ba', action='store', type=float, default=1.0, dest='base_alpha',
required=False)
parser.add_argument('--base_beta', '-Bb', action='store', type=float, default=1.0, dest='base_beta',
required=False)
parser.add_argument('--middle_alpha', '-Ma', action='store', type=float, default=1.0, dest='middle_alpha',
required=False)
parser.add_argument('--middle_beta', '-Mb', action='store', type=float, default=1.0, dest='middle_beta',
required=False)
parser.add_argument('--leaf_alpha', '-La', action='store', type=float, default=1.0, dest='leaf_alpha',
required=False)
parser.add_argument('--leaf_beta', '-Lb', action='store', type=float, default=1.0, dest='leaf_beta',
required=False)
# gibbs
parser.add_argument('--samples', '-s', action='store', type=int, default=10000, dest='gibbs_samples')
parser.add_argument('--thinning', '-th', action='store', type=int, default=100, dest='thinning')
parser.add_argument('--verbose', action='store_true', default=False, dest='verbose')
# sample grid
parser.add_argument('--grid_start', action='store', type=float, default=30.0, dest='grid_start')
parser.add_argument('--grid_end', action='store', type=float, default=90.0, dest='grid_end')
parser.add_argument('--grid_length', action='store', type=int, default=1200, dest='grid_length')
parser.add_argument('--out', '-o', action='store', type=str, required=True, dest='out')
return parser.parse_args()
def get_set_of_hdp_types(request):
if request == 'prior':
return [1, 3, 5, 7, 9]
else:
return [1, 2, 4, 6, 8]
def get_hdp_type(requested_type):
hdp_types = {
"singleLevelFixed": 0,
"singleLevelPrior": 1,
"multisetFixed": 2,
"multisetPrior": 3,
"compFixed": 4,
"compPrior": 5,
"middleNtsFixed": 6,
"middleNtsPrior": 7,
"groupMultisetFixed": 8,
"groupMultisetPrior": 9,
"singleLevelPrior2": 10,
"multisetPrior2": 11,
"multisetPriorEcoli": 12,
"singleLevelPriorEcoli": 13
}
assert (requested_type in hdp_types.keys()), "Requested HDP type is invalid, got {}".format(requested_type)
return hdp_types[requested_type]
def count_lines_in_build_alignment(build_alignment_path):
count = 0
for line in open(build_alignment_path, 'r').xreadlines():
count += 1
return count
def kmer_length_from_model(template_model_file, complement_model_file):
def get_kmer_length(model_file):
with open(model_file, "r") as fH:
line = fH.readline().split()
assert len(line) == 4, "HdpPipeline ERROR: wrong header in model file {}".format(model_file)
kmer_length = int(line[3])
fH.close()
return kmer_length
template_kmer_length = get_kmer_length(template_model_file)
complement_kmer_length = get_kmer_length(complement_model_file)
assert template_kmer_length == complement_kmer_length
return template_kmer_length
def get_initial_hdp_args(args, hdp_type):
# if we're making a HDP with fixed concentration parameters
if hdp_type in [0, 2, 4, 6, 8]:
assert None not in [args.base_gamma, args.leaf_gamma], \
"ERROR: need to specify concentration parameters for type {}".format(hdp_type)
if hdp_type == 0:
return "-B {base} -L {leaf} ".format(base=args.base_gamma, leaf=args.leaf_gamma)
else:
assert args.middle_gamma is not None, "ERROR: need to specify middle concentration param"
return "-B {base} -M {middle} -L {leaf} ".format(base=args.base_gamma, middle=args.middle_gamma,
leaf=args.leaf_gamma)
else:
assert None not in [args.base_alpha, args.base_beta, args.leaf_alpha, args.leaf_beta], \
"ERROR: missing Gamma prior hyper parameters"
if hdp_type == 1 or hdp_type == 10:
return "-g {Ba} -r {Bb} -i {La} -u {Lb} ".format(Ba=args.base_alpha, Bb=args.base_beta,
La=args.leaf_alpha, Lb=args.leaf_beta)
else:
assert None not in [args.middle_alpha, args.middle_beta], "ERROR: need middle hyper parameters"
return "-g {Ba} -r {Bb} -j {Ma} -y {Mb} -i {La} -u {Lb} ".format(Ba=args.base_alpha, Bb=args.base_beta,
Ma=args.middle_alpha, Mb=args.middle_beta,
La=args.leaf_alpha, Lb=args.leaf_beta)
# globals
HDP_TYPES = [
("singleLevelFixed", 0),
("singleLevelPrior", 1),
("multisetFixed", 2),
("multisetPrior", 3),
("compFixed", 4),
("compPrior", 5),
("middleNtsFixed", 6),
("middleNtsPrior", 7),
("groupMultisetFixed", 8),
("groupMultisetPrior", 9),
]
HDP_TYPES_2 = [
("singleLevelPrior2", 10),
("multisetPrior2", 11),
]
HDP_TYPES_ECOLI = [
("multisetPriorEcoli", 12),
("singleLevelPriorEcoli", 13),
]
# Pipeline Script
args = parse_args() # parse arguments
working_directory = args.out # this is the directory we will use for everything
assert os.path.isdir(working_directory), "ERROR: the working directory you specified doesn't exist."
pipeline_log = open(working_directory + "pipeline.log", 'a')
command_line = " ".join(sys.argv[:])
pipeline_log.write("[pipeline] Command Line: {}\n".format(command_line))
signalAlign_directory = "../../signalAlign/"
build_alignment_location = working_directory + "buildAlignment.tsv"
if args.build_alignment is None:
# build alignment
build_alignment_command = "{sA}scripts/makeBuildAlignments.py -o={bA} -t={threshold} -n={nbAssignments} " \
"".format(sA=signalAlign_directory, C=args.C_alns, mC=args.mC_alns,
threshold=args.threshold, hmC=args.hmC_alns, bA=build_alignment_location,
nbAssignments=args.max_assignments)
approx_total_build_assignments = 0 # keep track of about how many assignments we're going to get for gibbs burn in
if args.C_alns is not None: # add the alignments to the command
build_alignment_command += "-C={C} ".format(C=args.C_alns)
approx_total_build_assignments += args.max_assignments
if args.mC_alns is not None:
build_alignment_command += "-mC={mC} ".format(mC=args.mC_alns)
approx_total_build_assignments += args.max_assignments
if args.hmC_alns is not None:
build_alignment_command += "-hmC={hmC} ".format(hmC=args.hmC_alns)
approx_total_build_assignments += args.max_assignments
pipeline_log.write("[pipeline] NOTICE: Making build alignment using files from:\n\t{C}\n\t{mC}\n\t{hmC}\n"
"".format(C=args.C_alns, mC=args.mC_alns, hmC=args.hmC_alns))
pipeline_log.write("[pipeline] Command: {}\n".format(build_alignment_command))
check_call(build_alignment_command.split(), stderr=pipeline_log, stdout=pipeline_log)
else:
pipeline_log.write("[pipeline] NOTICE: using build alignment {}".format(args.build_alignment))
assert os.path.isfile(args.build_alignment), "ERROR: Didn't find input BuildAlignment"
copyfile(args.build_alignment, build_alignment_location)
approx_total_build_assignments = count_lines_in_build_alignment(build_alignment_location)
# initial HDP
assert (os.path.isfile(build_alignment_location)), "ERROR: Didn't find build alignment"
assert (os.path.exists("./buildHdpUtil")), "ERROR: Didn't find buildHdpUtil"
pipeline_log.write("[pipeline] NOTICE: Making initial HDP of type {}\n".format(args.hdp_type))
initial_hdp_build_out = open(working_directory + "build_initial_hdp.out", 'w')
initial_hdp_build_err = open(working_directory + "build_initial_hdp.err", 'w')
kmer_length = kmer_length_from_model(args.template_lookup, args.complement_lookup)
template_lookup_table = " -T" + args.template_lookup
complement_lookup_table = " -C" + args.complement_lookup
verbose_flag = "--verbose " if args.verbose is True else ""
build_commands = []
if args.hdp_type == "cytosine2":
hdp_types = HDP_TYPES_2
elif args.hdp_type == "ecoli":
hdp_types = HDP_TYPES_ECOLI
else:
hdp_types = HDP_TYPES[1::2] if args.hdp_type == "Prior" else HDP_TYPES[::2]
for hdp_type, i, in hdp_types:
template_hdp_location = working_directory + "template." + hdp_type + ".nhdp"
complement_hdp_location = working_directory + "complement." + hdp_type + ".nhdp"
build_initial_hdp_command = "./buildHdpUtil {verbose}-p {hdpType} -v {tHdpLoc} -w {cHdpLoc} -l {buildAln} " \
"-a {kmerLength} -n {samples} -I {burnIn} -t {thin} -s {start} -e {end} " \
"-k {len}{tL}{cL} " \
"".format(hdpType=i, tHdpLoc=template_hdp_location,
cHdpLoc=complement_hdp_location, buildAln=build_alignment_location,
samples=args.gibbs_samples, burnIn=32 * approx_total_build_assignments,
thin=args.thinning, start=args.grid_start, end=args.grid_end,
len=args.grid_length, verbose=verbose_flag, tL=template_lookup_table,
cL=complement_lookup_table, kmerLength=kmer_length)
build_initial_hdp_command += get_initial_hdp_args(args=args, hdp_type=i)
build_commands.append(build_initial_hdp_command)
pipeline_log.write("[pipeline] Command: {}\n".format(build_initial_hdp_command))
procs = [Popen(x.split(), stdout=initial_hdp_build_out, stderr=initial_hdp_build_err) for x in build_commands]
status = [p.wait() for p in procs]
initial_hdp_build_out.close()
initial_hdp_build_err.close()
pipeline_log.write("[pipeline] DONE.\n")
pipeline_log.close()
| mit |
pexip/pygobject | tests/test_ossig.py | 1 | 6285 | # -*- coding: utf-8 -*-
# Copyright 2017 Christoph Reiter
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import os
import signal
import unittest
import threading
from contextlib import contextmanager
try:
from gi.repository import Gtk
except ImportError:
Gtk = None
from gi.repository import Gio, GLib
from gi._ossighelper import wakeup_on_signal, register_sigint_fallback
class TestOverridesWakeupOnAlarm(unittest.TestCase):
@contextmanager
def _run_with_timeout(self, timeout, abort_func):
failed = []
def fail():
abort_func()
failed.append(1)
return True
fail_id = GLib.timeout_add(timeout, fail)
try:
yield
finally:
GLib.source_remove(fail_id)
self.assertFalse(failed)
def test_basic(self):
self.assertEqual(signal.set_wakeup_fd(-1), -1)
with wakeup_on_signal():
pass
self.assertEqual(signal.set_wakeup_fd(-1), -1)
def test_in_thread(self):
failed = []
def target():
try:
with wakeup_on_signal():
pass
except:
failed.append(1)
t = threading.Thread(target=target)
t.start()
t.join(5)
self.assertFalse(failed)
@unittest.skipIf(os.name == "nt", "not on Windows")
def test_glib_mainloop(self):
loop = GLib.MainLoop()
signal.signal(signal.SIGALRM, lambda *args: loop.quit())
GLib.idle_add(signal.setitimer, signal.ITIMER_REAL, 0.001)
with self._run_with_timeout(2000, loop.quit):
loop.run()
@unittest.skipIf(os.name == "nt", "not on Windows")
def test_gio_application(self):
app = Gio.Application()
signal.signal(signal.SIGALRM, lambda *args: app.quit())
GLib.idle_add(signal.setitimer, signal.ITIMER_REAL, 0.001)
with self._run_with_timeout(2000, app.quit):
app.hold()
app.connect("activate", lambda *args: None)
app.run()
@unittest.skipIf(Gtk is None or os.name == "nt", "not on Windows")
def test_gtk_main(self):
signal.signal(signal.SIGALRM, lambda *args: Gtk.main_quit())
GLib.idle_add(signal.setitimer, signal.ITIMER_REAL, 0.001)
with self._run_with_timeout(2000, Gtk.main_quit):
Gtk.main()
@unittest.skipIf(Gtk is None or os.name == "nt", "not on Windows")
def test_gtk_dialog_run(self):
w = Gtk.Window()
d = Gtk.Dialog(transient_for=w)
signal.signal(signal.SIGALRM, lambda *args: d.destroy())
GLib.idle_add(signal.setitimer, signal.ITIMER_REAL, 0.001)
with self._run_with_timeout(2000, d.destroy):
d.run()
class TestSigintFallback(unittest.TestCase):
def setUp(self):
self.assertEqual(
signal.getsignal(signal.SIGINT), signal.default_int_handler)
def tearDown(self):
self.assertEqual(
signal.getsignal(signal.SIGINT), signal.default_int_handler)
def test_replace_handler_and_restore_nested(self):
with register_sigint_fallback(lambda: None):
new_handler = signal.getsignal(signal.SIGINT)
self.assertNotEqual(new_handler, signal.default_int_handler)
with register_sigint_fallback(lambda: None):
self.assertTrue(signal.getsignal(signal.SIGINT) is new_handler)
self.assertEqual(
signal.getsignal(signal.SIGINT), signal.default_int_handler)
def test_no_replace_if_not_default(self):
new_handler = lambda *args: None
signal.signal(signal.SIGINT, new_handler)
try:
with register_sigint_fallback(lambda: None):
self.assertTrue(signal.getsignal(signal.SIGINT) is new_handler)
with register_sigint_fallback(lambda: None):
self.assertTrue(
signal.getsignal(signal.SIGINT) is new_handler)
self.assertTrue(signal.getsignal(signal.SIGINT) is new_handler)
finally:
signal.signal(signal.SIGINT, signal.default_int_handler)
def test_noop_in_threads(self):
failed = []
def target():
try:
with register_sigint_fallback(lambda: None):
with register_sigint_fallback(lambda: None):
self.assertTrue(
signal.getsignal(signal.SIGINT) is
signal.default_int_handler)
except:
failed.append(1)
t = threading.Thread(target=target)
t.start()
t.join(5)
self.assertFalse(failed)
@unittest.skipIf(os.name == "nt", "not on Windows")
def test_no_replace_if_set_by_glib(self):
id_ = GLib.unix_signal_add(
GLib.PRIORITY_DEFAULT, signal.SIGINT, lambda *args: None)
try:
# signal.getsignal() doesn't pick up that unix_signal_add()
# has changed the handler, but we should anyway.
self.assertEqual(
signal.getsignal(signal.SIGINT), signal.default_int_handler)
with register_sigint_fallback(lambda: None):
self.assertEqual(
signal.getsignal(signal.SIGINT),
signal.default_int_handler)
self.assertEqual(
signal.getsignal(signal.SIGINT), signal.default_int_handler)
finally:
GLib.source_remove(id_)
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.default_int_handler)
| lgpl-2.1 |
whereismyjetpack/ansible | lib/ansible/utils/module_docs_fragments/dimensiondata_wait.py | 192 | 1429 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Dimension Data
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# - Adam Friedman <tintoy@tintoy.io>
class ModuleDocFragment(object):
# Dimension Data ("wait-for-completion" parameters) doc fragment
DOCUMENTATION = '''
options:
wait:
description:
- Should we wait for the task to complete before moving onto the next.
required: false
default: false
wait_time:
description:
- The maximum amount of time (in seconds) to wait for the task to complete.
- Only applicable if I(wait=true).
required: false
default: 600
wait_poll_interval:
description:
- The amount of time (in seconds) to wait between checks for task completion.
- Only applicable if I(wait=true).
required: false
default: 2
'''
| gpl-3.0 |
googleapis/googleapis-gen | google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/common/types/asset_policy.py | 1 | 2207 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v7.common.types import policy
from google.ads.googleads.v7.enums.types import policy_approval_status
from google.ads.googleads.v7.enums.types import policy_review_status
__protobuf__ = proto.module(
package='google.ads.googleads.v7.common',
marshal='google.ads.googleads.v7',
manifest={
'AdAssetPolicySummary',
},
)
class AdAssetPolicySummary(proto.Message):
r"""Contains policy information for an asset inside an ad.
Attributes:
policy_topic_entries (Sequence[google.ads.googleads.v7.common.types.PolicyTopicEntry]):
The list of policy findings for this asset.
review_status (google.ads.googleads.v7.enums.types.PolicyReviewStatusEnum.PolicyReviewStatus):
Where in the review process this asset.
approval_status (google.ads.googleads.v7.enums.types.PolicyApprovalStatusEnum.PolicyApprovalStatus):
The overall approval status of this asset,
which is calculated based on the status of its
individual policy topic entries.
"""
policy_topic_entries = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=policy.PolicyTopicEntry,
)
review_status = proto.Field(
proto.ENUM,
number=2,
enum=policy_review_status.PolicyReviewStatusEnum.PolicyReviewStatus,
)
approval_status = proto.Field(
proto.ENUM,
number=3,
enum=policy_approval_status.PolicyApprovalStatusEnum.PolicyApprovalStatus,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
sysadminmatmoz/OCB | addons/base_gengo/wizard/base_gengo_translations.py | 45 | 12183 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import uuid
import logging
import re
import time
from openerp.osv import osv, fields
from openerp import tools, SUPERUSER_ID
from openerp.tools.translate import _
from openerp.exceptions import UserError
_logger = logging.getLogger(__name__)
try:
from gengo import Gengo
except ImportError:
_logger.warning('Gengo library not found, Gengo features disabled. If you plan to use it, please install the gengo library from http://pypi.python.org/pypi/gengo')
GENGO_DEFAULT_LIMIT = 20
class base_gengo_translations(osv.osv_memory):
GENGO_KEY = "Gengo.UUID"
GROUPS = ['base.group_system']
_name = 'base.gengo.translations'
_columns = {
'sync_type': fields.selection([('send', 'Send New Terms'),
('receive', 'Receive Translation'),
('both', 'Both')], "Sync Type", required=True),
'lang_id': fields.many2one('res.lang', 'Language', required=True),
'sync_limit': fields.integer("No. of terms to sync"),
}
_defaults = {
'sync_type': 'both',
'sync_limit': 20
}
def init(self, cr):
icp = self.pool['ir.config_parameter']
if not icp.get_param(cr, SUPERUSER_ID, self.GENGO_KEY, default=None):
icp.set_param(cr, SUPERUSER_ID, self.GENGO_KEY, str(uuid.uuid4()), groups=self.GROUPS)
def get_gengo_key(self, cr):
icp = self.pool['ir.config_parameter']
return icp.get_param(cr, SUPERUSER_ID, self.GENGO_KEY, default="Undefined")
def gengo_authentication(self, cr, uid, context=None):
'''
This method tries to open a connection with Gengo. For that, it uses the Public and Private
keys that are linked to the company (given by Gengo on subscription). It returns a tuple with
* as first element: a boolean depicting if the authentication was a success or not
* as second element: the connection, if it was a success, or the error message returned by
Gengo when the connection failed.
This error message can either be displayed in the server logs (if the authentication was called
by the cron) or in a dialog box (if requested by the user), thus it's important to return it
translated.
'''
user = self.pool.get('res.users').browse(cr, 1, uid, context=context)
if not user.company_id.gengo_public_key or not user.company_id.gengo_private_key:
return (False, _("Gengo `Public Key` or `Private Key` are missing. Enter your Gengo authentication parameters under `Settings > Companies > Gengo Parameters`."))
try:
gengo = Gengo(
public_key=user.company_id.gengo_public_key.encode('ascii'),
private_key=user.company_id.gengo_private_key.encode('ascii'),
sandbox=user.company_id.gengo_sandbox,
)
gengo.getAccountStats()
return (True, gengo)
except Exception, e:
_logger.exception('Gengo connection failed')
return (False, _("Gengo connection failed with this message:\n``%s``") % e)
def act_update(self, cr, uid, ids, context=None):
'''
Function called by the wizard.
'''
if context is None:
context = {}
flag, gengo = self.gengo_authentication(cr, uid, context=context)
if not flag:
raise UserError(gengo)
for wizard in self.browse(cr, uid, ids, context=context):
supported_langs = self.pool.get('ir.translation')._get_all_supported_languages(cr, uid, context=context)
language = self.pool.get('ir.translation')._get_gengo_corresponding_language(wizard.lang_id.code)
if language not in supported_langs:
raise UserError(_('This language is not supported by the Gengo translation services.'))
ctx = context.copy()
ctx['gengo_language'] = wizard.lang_id.id
if wizard.sync_limit > 200 or wizard.sync_limit < 1:
raise UserError(_('The number of terms to sync should be between 1 to 200 to work with Gengo translation services.'))
if wizard.sync_type in ['send', 'both']:
self._sync_request(cr, uid, wizard.sync_limit, context=ctx)
if wizard.sync_type in ['receive', 'both']:
self._sync_response(cr, uid, wizard.sync_limit, context=ctx)
return {'type': 'ir.actions.act_window_close'}
def _sync_response(self, cr, uid, limit=GENGO_DEFAULT_LIMIT, context=None):
"""
This method will be called by cron services to get translations from
Gengo. It will read translated terms and comments from Gengo and will
update respective ir.translation in Odoo.
"""
translation_pool = self.pool.get('ir.translation')
flag, gengo = self.gengo_authentication(cr, uid, context=context)
if not flag:
_logger.warning("%s", gengo)
else:
offset = 0
all_translation_ids = translation_pool.search(cr, uid, [('state', '=', 'inprogress'), ('gengo_translation', 'in', ('machine', 'standard', 'pro', 'ultra')), ('order_id', "!=", False)], context=context)
while True:
translation_ids = all_translation_ids[offset:offset + limit]
offset += limit
if not translation_ids:
break
terms_progress = {
'gengo_order_ids': set(),
'ir_translation_ids': set(),
}
translation_terms = translation_pool.browse(cr, uid, translation_ids, context=context)
for term in translation_terms:
terms_progress['gengo_order_ids'].add(term.order_id)
terms_progress['ir_translation_ids'].add(tools.ustr(term.id))
for order_id in terms_progress['gengo_order_ids']:
order_response = gengo.getTranslationOrderJobs(id=order_id)
jobs_approved = order_response.get('response', []).get('order', []).get('jobs_approved', [])
gengo_ids = ','.join(jobs_approved)
if gengo_ids: # Need to check, because getTranslationJobBatch don't catch this case and so call the getTranslationJobs because no ids in url
try:
job_response = gengo.getTranslationJobBatch(id=gengo_ids)
except:
continue
if job_response['opstat'] == 'ok':
for job in job_response['response'].get('jobs', []):
if job.get('custom_data') in terms_progress['ir_translation_ids']:
self._update_terms_job(cr, uid, job, context=context)
return True
def _update_terms_job(self, cr, uid, job, context=None):
translation_pool = self.pool.get('ir.translation')
tid = int(job['custom_data'])
vals = {}
if job.get('status', False) in ('queued', 'available', 'pending', 'reviewable'):
vals['state'] = 'inprogress'
if job.get('body_tgt', False) and job.get('status', False) == 'approved':
vals['value'] = job['body_tgt']
if job.get('status', False) in ('approved', 'canceled'):
vals['state'] = 'translated'
if vals:
translation_pool.write(cr, uid, [tid], vals, context=context)
def _update_terms(self, cr, uid, response, term_ids, context=None):
"""
Update the terms after their translation were requested to Gengo
"""
translation_pool = self.pool.get('ir.translation')
vals = {
'order_id': response.get('order_id', ''),
'state': 'inprogress'
}
translation_pool.write(cr, uid, term_ids, vals, context=context)
jobs = response.get('jobs', [])
if jobs:
for t_id, res in jobs.items():
self._update_terms_job(cr, uid, res, context=context)
return
def pack_jobs_request(self, cr, uid, term_ids, context=None):
''' prepare the terms that will be requested to gengo and returns them in a dictionary with following format
{'jobs': {
'term1.id': {...}
'term2.id': {...}
}
}'''
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
translation_pool = self.pool.get('ir.translation')
jobs = {}
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
auto_approve = 1 if user.company_id.gengo_auto_approve else 0
for term in translation_pool.browse(cr, uid, term_ids, context=context):
if re.search(r"\w", term.src or ""):
comment = user.company_id.gengo_comment or ''
if term.gengo_comment:
comment += '\n' + term.gengo_comment
jobs[time.strftime('%Y%m%d%H%M%S') + '-' + str(term.id)] = {
'type': 'text',
'slug': 'Single :: English to ' + term.lang,
'tier': tools.ustr(term.gengo_translation),
'custom_data': str(term.id),
'body_src': term.src,
'lc_src': 'en',
'lc_tgt': translation_pool._get_gengo_corresponding_language(term.lang),
'auto_approve': auto_approve,
'comment': comment,
'callback_url': "%s/website/gengo_callback?pgk=%s&db=%s" % (base_url, self.get_gengo_key(cr), cr.dbname)
}
return {'jobs': jobs, 'as_group': 0}
def _send_translation_terms(self, cr, uid, term_ids, context=None):
"""
Send a request to Gengo with all the term_ids in a different job, get the response and update the terms in
database accordingly.
"""
flag, gengo = self.gengo_authentication(cr, uid, context=context)
if flag:
request = self.pack_jobs_request(cr, uid, term_ids, context=context)
if request['jobs']:
result = gengo.postTranslationJobs(jobs=request)
if result['opstat'] == 'ok':
self._update_terms(cr, uid, result['response'], term_ids, context=context)
else:
_logger.error(gengo)
return True
def _sync_request(self, cr, uid, limit=GENGO_DEFAULT_LIMIT, context=None):
"""
This scheduler will send a job request to the gengo , which terms are
waiing to be translated and for which gengo_translation is enabled.
A special key 'gengo_language' can be passed in the context in order to
request only translations of that language only. Its value is the language
ID in Odoo.
"""
if context is None:
context = {}
language_pool = self.pool.get('res.lang')
translation_pool = self.pool.get('ir.translation')
domain = [('state', '=', 'to_translate'), ('gengo_translation', 'in', ('machine', 'standard', 'pro', 'ultra')), ('order_id', "=", False)]
if context.get('gengo_language', False):
lc = language_pool.browse(cr, uid, context['gengo_language'], context=context).code
domain.append(('lang', '=', lc))
all_term_ids = translation_pool.search(cr, uid, domain, context=context)
try:
offset = 0
while True:
#search for the n first terms to translate
term_ids = all_term_ids[offset:offset + limit]
if term_ids:
offset += limit
self._send_translation_terms(cr, uid, term_ids, context=context)
_logger.info("%s Translation terms have been posted to Gengo successfully", len(term_ids))
if not len(term_ids) == limit:
break
except Exception, e:
_logger.error("%s", e)
| agpl-3.0 |
jgeskens/django | tests/special_headers/tests.py | 4 | 2696 | from django.contrib.auth.models import User
from django.test import TestCase
from django.test.utils import override_settings
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class SpecialHeadersTest(TestCase):
fixtures = ['data.xml']
urls = 'special_headers.urls'
def test_xheaders(self):
user = User.objects.get(username='super')
response = self.client.get('/special_headers/article/1/')
self.assertFalse('X-Object-Type' in response)
self.client.login(username='super', password='secret')
response = self.client.get('/special_headers/article/1/')
self.assertTrue('X-Object-Type' in response)
user.is_staff = False
user.save()
response = self.client.get('/special_headers/article/1/')
self.assertFalse('X-Object-Type' in response)
user.is_staff = True
user.is_active = False
user.save()
response = self.client.get('/special_headers/article/1/')
self.assertFalse('X-Object-Type' in response)
def test_xview_func(self):
user = User.objects.get(username='super')
response = self.client.head('/special_headers/xview/func/')
self.assertFalse('X-View' in response)
self.client.login(username='super', password='secret')
response = self.client.head('/special_headers/xview/func/')
self.assertTrue('X-View' in response)
self.assertEqual(response['X-View'], 'special_headers.views.xview')
user.is_staff = False
user.save()
response = self.client.head('/special_headers/xview/func/')
self.assertFalse('X-View' in response)
user.is_staff = True
user.is_active = False
user.save()
response = self.client.head('/special_headers/xview/func/')
self.assertFalse('X-View' in response)
def test_xview_class(self):
user = User.objects.get(username='super')
response = self.client.head('/special_headers/xview/class/')
self.assertFalse('X-View' in response)
self.client.login(username='super', password='secret')
response = self.client.head('/special_headers/xview/class/')
self.assertTrue('X-View' in response)
self.assertEqual(response['X-View'], 'special_headers.views.XViewClass')
user.is_staff = False
user.save()
response = self.client.head('/special_headers/xview/class/')
self.assertFalse('X-View' in response)
user.is_staff = True
user.is_active = False
user.save()
response = self.client.head('/special_headers/xview/class/')
self.assertFalse('X-View' in response)
| bsd-3-clause |
jamesblunt/scrapy | scrapy/contracts/__init__.py | 159 | 5416 | import sys
import re
from functools import wraps
from unittest import TestCase
from scrapy.http import Request
from scrapy.utils.spider import iterate_spider_output
from scrapy.utils.python import get_spec
class ContractsManager(object):
contracts = {}
def __init__(self, contracts):
for contract in contracts:
self.contracts[contract.name] = contract
def tested_methods_from_spidercls(self, spidercls):
methods = []
for key, value in vars(spidercls).items():
if (callable(value) and value.__doc__ and
re.search(r'^\s*@', value.__doc__, re.MULTILINE)):
methods.append(key)
return methods
def extract_contracts(self, method):
contracts = []
for line in method.__doc__.split('\n'):
line = line.strip()
if line.startswith('@'):
name, args = re.match(r'@(\w+)\s*(.*)', line).groups()
args = re.split(r'\s+', args)
contracts.append(self.contracts[name](method, *args))
return contracts
def from_spider(self, spider, results):
requests = []
for method in self.tested_methods_from_spidercls(type(spider)):
bound_method = spider.__getattribute__(method)
requests.append(self.from_method(bound_method, results))
return requests
def from_method(self, method, results):
contracts = self.extract_contracts(method)
if contracts:
# calculate request args
args, kwargs = get_spec(Request.__init__)
kwargs['callback'] = method
for contract in contracts:
kwargs = contract.adjust_request_args(kwargs)
# create and prepare request
args.remove('self')
if set(args).issubset(set(kwargs)):
request = Request(**kwargs)
# execute pre and post hooks in order
for contract in reversed(contracts):
request = contract.add_pre_hook(request, results)
for contract in contracts:
request = contract.add_post_hook(request, results)
self._clean_req(request, method, results)
return request
def _clean_req(self, request, method, results):
""" stop the request from returning objects and records any errors """
cb = request.callback
@wraps(cb)
def cb_wrapper(response):
try:
output = cb(response)
output = list(iterate_spider_output(output))
except:
case = _create_testcase(method, 'callback')
results.addError(case, sys.exc_info())
def eb_wrapper(failure):
case = _create_testcase(method, 'errback')
exc_info = failure.value, failure.type, failure.getTracebackObject()
results.addError(case, exc_info)
request.callback = cb_wrapper
request.errback = eb_wrapper
class Contract(object):
""" Abstract class for contracts """
def __init__(self, method, *args):
self.testcase_pre = _create_testcase(method, '@%s pre-hook' % self.name)
self.testcase_post = _create_testcase(method, '@%s post-hook' % self.name)
self.args = args
def add_pre_hook(self, request, results):
if hasattr(self, 'pre_process'):
cb = request.callback
@wraps(cb)
def wrapper(response):
try:
results.startTest(self.testcase_pre)
self.pre_process(response)
results.stopTest(self.testcase_pre)
except AssertionError:
results.addFailure(self.testcase_pre, sys.exc_info())
except Exception:
results.addError(self.testcase_pre, sys.exc_info())
else:
results.addSuccess(self.testcase_pre)
finally:
return list(iterate_spider_output(cb(response)))
request.callback = wrapper
return request
def add_post_hook(self, request, results):
if hasattr(self, 'post_process'):
cb = request.callback
@wraps(cb)
def wrapper(response):
output = list(iterate_spider_output(cb(response)))
try:
results.startTest(self.testcase_post)
self.post_process(output)
results.stopTest(self.testcase_post)
except AssertionError:
results.addFailure(self.testcase_post, sys.exc_info())
except Exception:
results.addError(self.testcase_post, sys.exc_info())
else:
results.addSuccess(self.testcase_post)
finally:
return output
request.callback = wrapper
return request
def adjust_request_args(self, args):
return args
def _create_testcase(method, desc):
spider = method.__self__.name
class ContractTestCase(TestCase):
def __str__(_self):
return "[%s] %s (%s)" % (spider, method.__name__, desc)
name = '%s_%s' % (spider, method.__name__)
setattr(ContractTestCase, name, lambda x: x)
return ContractTestCase(name)
| bsd-3-clause |
qbilius/streams | streams/utils.py | 1 | 13227 | import functools
import numpy as np
import scipy.stats
import pandas
import matplotlib.pyplot as plt
import seaborn as sns
def splithalf(data, aggfunc=np.nanmean, rng=None):
data = np.array(data)
if rng is None:
rng = np.random.RandomState(None)
inds = list(range(data.shape[0]))
rng.shuffle(inds)
half = len(inds) // 2
split1 = aggfunc(data[inds[:half]], axis=0)
split2 = aggfunc(data[inds[half:2*half]], axis=0)
return split1, split2
def pearsonr_matrix(data1, data2, axis=1):
rs = []
for i in range(data1.shape[axis]):
d1 = np.take(data1, i, axis=axis)
d2 = np.take(data2, i, axis=axis)
r, p = scipy.stats.pearsonr(d1, d2)
rs.append(r)
return np.array(rs)
def spearman_brown_correct(pearsonr, n=2):
pearsonr = np.array(pearsonr)
return n * pearsonr / (1 + (n-1) * pearsonr)
def resample(data, rng=None):
data = np.array(data)
if rng is None:
rng = np.random.RandomState(None)
inds = rng.choice(range(data.shape[0]), size=data.shape[0], replace=True)
return data[inds]
def bootstrap_resample(data, func=np.mean, niter=100, ci=95, rng=None):
df = [func(resample(data, rng=rng)) for i in range(niter)]
if ci is not None:
return np.percentile(df, 50-ci/2.), np.percentile(df, 50+ci/2.)
else:
return df
def _timeplot_bootstrap(x, estimator=np.mean, ci=95, n_boot=100):
ci = bootstrap_resample(x, func=estimator, ci=ci, niter=n_boot)
return pandas.Series({'emin': ci[0], 'emax': ci[1]})
def timeplot(data=None, x=None, y=None, hue=None,
estimator=np.mean, ci=95, n_boot=100,
col=None, row=None, sharex=None, sharey=None,
legend_loc='lower right', **fig_kwargs):
if hue is None:
hues = ['']
else:
hues = data[hue].unique()
if data[hue].dtype.name == 'category': hues = hues.sort_values()
# plt.figure()
if row is None:
row_orig = None
tmp = 'row_{}'
i = 0
row = tmp.format(i)
while row in data:
i += 1
row = tmp.format(i)
data[row] = 'row'
else:
row_orig = row
if col is None:
col_orig = None
tmp = 'col_{}'
i = 0
col = tmp.format(i)
while col in data:
i += 1
col = tmp.format(i)
data[col] = 'col'
else:
col_orig = col
if row is not None:
rows = data[row].unique()
if data[row].dtype.name == 'category': rows = rows.sort_values()
else:
rows = [(None, None)]
if col is not None:
cols = data[col].unique()
if data[col].dtype.name == 'category': cols = cols.sort_values()
else:
cols = [(None, None)]
fig, axes = plt.subplots(nrows=len(rows), ncols=len(cols), **fig_kwargs)
if hasattr(axes, 'shape'):
axes = axes.reshape([len(rows), len(cols)])
else:
axes = np.array([[axes]])
xlim = data.groupby([row, col])[x].apply(lambda x: {'amin': x.min(), 'amax': x.max()}).unstack()
ylim = data.groupby([row, col])[y].apply(lambda x: {'amin': x.min(), 'amax': x.max()}).unstack()
if sharex == 'row':
for r in rows:
xlim.loc[r, 'amin'] = xlim.loc[r, 'amin'].min()
xlim.loc[r, 'amax'] = xlim.loc[r, 'amax'].max()
elif sharex == 'col':
for c in cols:
xlim.loc[(slice(None), c), 'amin'] = xlim.loc[(slice(None), c), 'amin'].min()
xlim.loc[(slice(None), c), 'amax'] = xlim.loc[(slice(None), c), 'amax'].max()
elif sharex == 'both':
xlim.loc[:, 'amin'] = xlim.loc[:, 'amin'].min()
xlim.loc[:, 'amax'] = xlim.loc[:, 'amax'].min()
elif isinstance(sharex, (tuple, list)):
xlim.loc[:, 'amin'] = sharex[0]
xlim.loc[:, 'amax'] = sharex[1]
if sharey == 'row':
for r in rows:
ylim.loc[r, 'amin'] = ylim.loc[r, 'amin'].min()
ylim.loc[r, 'amax'] = ylim.loc[r, 'amax'].max()
elif sharey == 'col':
for c in cols:
ylim.loc[(slice(None), c), 'amin'] = ylim.loc[(slice(None), c), 'amin'].min()
ylim.loc[(slice(None), c), 'amax'] = ylim.loc[(slice(None), c), 'amax'].max()
elif sharey == 'both':
ylim.loc[:, 'amin'] = ylim.loc[:, 'amin'].min()
ylim.loc[:, 'amax'] = ylim.loc[:, 'amax'].min()
elif isinstance(sharey, (tuple, list)):
ylim.loc[:, 'amin'] = sharey[0]
ylim.loc[:, 'amax'] = sharey[1]
for rno, r in enumerate(rows):
for cno, c in enumerate(cols):
ax = axes[rno,cno]
for h, color in zip(hues, sns.color_palette(n_colors=len(hues))):
if hue is None:
d = data
else:
d = data[data[hue] == h]
sel_col = d[col] == c if col is not None else True
sel_row = d[row] == r if row is not None else True
if not (col is None and row is None):
d = d[sel_row & sel_col]
# if c == 'hvm_test': import ipdb; ipdb.set_trace()
if len(d) > 0:
mn = d.groupby(x)[y].apply(estimator)
def bootstrap(x):
try:
y = _timeplot_bootstrap(x[x.notnull()], estimator, ci, n_boot)
except:
y = _timeplot_bootstrap(x, estimator, ci, n_boot)
return y
if n_boot > 0:
ebars = d.groupby(x)[y].apply(bootstrap).unstack()
ax.fill_between(mn.index, ebars.emin, ebars.emax, alpha=.5, color=color)
ax.plot(mn.index, mn, linewidth=2, color=color, label=h)
else:
ax.set_visible(False)
try:
ax.set_xlim([xlim.loc[(r, c), 'amin'], xlim.loc[(r, c), 'amax']])
except:
pass
try:
ax.set_ylim([ylim.loc[(r, c), 'amin'], ylim.loc[(r, c), 'amax']])
except:
pass
if ax.is_last_row():
ax.set_xlabel(x)
if ax.is_first_col():
ax.set_ylabel(y)
if row_orig is None:
if col_orig is None:
ax.set_title('')
else:
ax.set_title('{} = {}'.format(col_orig, c))
else:
if col_orig is None:
ax.set_title('{} = {}'.format(row_orig, r))
else:
ax.set_title('{} = {} | {} = {}'.format(row_orig, r, col_orig, c))
if hue is not None:
plt.legend(loc=legend_loc, framealpha=.25)
plt.tight_layout()
return axes
def clean_data(df, std_thres=3, stim_dur_thres=1000./120):
"""
Remove outliers from behavioral data
What is removed:
- If response time is more than `std_thres` standard deviations above
the mean response time to all stimuli (default: 3)
- If the recorded stimulus duration differs by more than `std_thres`
from the requested stimulus duration (default: half a frame for 60 Hz)
:Args:
df - pandas.DataFrame
:Kwargs:
- std_thres (float, default: 3)
- stim_dur_thres (float, default: 1000./120)
:Returns:
pandas.DataFrame that has the outliers removed (not nanned)
"""
fast_rts = np.abs(df.rt - df.rt.mean()) < 3 * df.rt.std()
good_present_time = np.abs(df.actual_stim_dur - df.stim_dur) < stim_dur_thres # half a frame
print('Response too slow: {} out of {}'.format(len(df) - fast_rts.sum(), len(df)))
print('Stimulus presentation too slow: {} out of {}'.format(len(df) - good_present_time.sum(), len(df)))
df = df[fast_rts & good_present_time]
return df
def lazy_property(function):
"""
From: https://danijar.com/structuring-your-tensorflow-models/
"""
attribute = '_cache_' + function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
# def hitrate_to_dprime(df, cap=5):
# # df = pandas.DataFrame(hitrate, index=labels, columns=order)
# out = np.zeros_like(df)
# for (i,j), hit_rate in np.ndenumerate(df.values):
# target = df.index[i]
# distr = df.columns[j]
# if target == distr:
# dprime = np.nan
# else:
# miss_rate = df.loc[df.index == target, distr].mean()
# hit = hit_rate / (hit_rate + miss_rate)
# fa_rate = df.loc[df.index == distr, target].mean()
# rej_rate = df.loc[df.index == distr, distr].mean()
# fa = fa_rate / (fa_rate + rej_rate)
# dprime = scipy.stats.norm.ppf(hit) - scipy.stats.norm.ppf(fa)
# if dprime > cap: dprime = cap
# out[i,j] = dprime
# return out
def hitrate_to_dprime_o1(df, cap=20):
# df = pandas.DataFrame(hitrate, index=labels, columns=order)
targets = df.index.unique()
# distrs = df.columns.unique()
# out = pandas.DataFrame(np.zeros([len(targets), len(distrs)]), index=targets, columns=distrs)
out = pandas.Series(np.zeros(len(targets)), index=targets)
for target in targets:
# if target == 'lo_poly_animal_RHINO_2': import ipdb; ipdb.set_trace()
hit_rate = np.nanmean(df.loc[df.index == target])
# miss_rate = 1 - np.nanmean(df.loc[df.index == target])
fa_rate = np.nanmean(1 - df.loc[df.index != target, target])
dprime = scipy.stats.norm.ppf(hit_rate) - scipy.stats.norm.ppf(fa_rate)
dprime = np.clip(dprime, -cap, cap)
out[target] = dprime
return out
# for distr in distrs:
# # for (i,j), hit_rate in np.ndenumerate(df.values):
# if target == distr:
# dprime = np.nan
# else:
# hit_rate = df.loc[df.index == target].mean()
# miss_rate = df.loc[df.index == target, distr].mean()
# hit = hit_rate / (hit_rate + miss_rate)
# fa_rate = df.loc[df.index == distr, target].mean()
# rej_rate = df.loc[df.index == distr, distr].mean()
# fa = fa_rate / (fa_rate + rej_rate)
# dprime = scipy.stats.norm.ppf(hit) - scipy.stats.norm.ppf(fa)
# if dprime > cap: dprime = cap
# out[target, distr] = dprime
# return out
def hitrate_to_dprime_i1n(df, cap=20, normalize=True):
out = pandas.Series(np.zeros(len(df)),
index=df.set_index(['obj', 'id']).index)
for (target, idd), row in df.iterrows():
hit_rate = row.acc
# miss_rate = 1 - np.nanmean(df.loc[df.index == target])
rej = df.loc[df.obj != target, target]
fa_rate = 1 - np.nanmean(rej)
dprime = scipy.stats.norm.ppf(hit_rate) - scipy.stats.norm.ppf(fa_rate)
dprime = np.clip(dprime, -cap, cap)
out.loc[(target, idd)] = dprime
if normalize:
out.acc -= out.groupby('obj').acc.transform(lambda x: x.mean())
return out
def hitrate_to_dprime_i2n(df, cap=20):
# df = pandas.DataFrame(hitrate, index=labels, columns=order)
# targets = df.index.unique()
# distrs = df.columns.unique()
# out = pandas.DataFrame(np.zeros([len(targets), len(distrs)]), index=targets, columns=distrs)
# df = df.set_index(['obj', 'id', 'distr'])
# out = pandas.DataFrame(np.zeros(len(df), len(df.distr.unique()), index=df.index, columns=df.columns)
out = df.set_index(['obj', 'id', 'distr']).copy()
for (target, idd, distr), hit_rate in out.iterrows():
if target == distr:
out.loc[(target, idd, distr)] = np.nan
else:
# if target == 'lo_poly_animal_RHINO_2': import ipdb; ipdb.set_trace()
# hit_rate = acc
# miss_rate = 1 - np.nanmean(df.loc[df.index == target])
rej = df.loc[(df.obj == distr) & (df.distr == target), 'acc']
# import ipdb; ipdb.set_trace()
fa_rate = 1 - np.nanmean(rej)
dprime = scipy.stats.norm.ppf(hit_rate) - scipy.stats.norm.ppf(fa_rate)
# if target == 'lo_poly_animal_RHINO_2' and distr == 'MB30758' and idd == 'e387f6375d1d01a92f02394ea0c2c89de4ec4f61':
# import ipdb; ipdb.set_trace()
# hit_rate_norm = np.nanmean(df.loc[(df.obj == target) & (df.distr == distr), 'acc'])
# dprime_norm = scipy.stats.norm.ppf(hit_rate_norm) - scipy.stats.norm.ppf(fa_rate)
# dprime -= dprime_norm
out.loc[(target, idd, distr)] = dprime
# def ff(x):
# import ipdb; ipdb.set_trace()
# return x.mean()
out = out.reset_index()
out.acc -= out.groupby(['obj', 'distr']).acc.transform(lambda x: x.mean())
out.acc = np.clip(out.acc, -cap, cap)
# for (target, idd, distr), dprime in out.iterrows():
# out.loc[(target, idd, distr)] = dprime
# dprime = np.clip(dprime, -cap, cap)
return out | gpl-3.0 |
bitcrystal/volatility | volatility/plugins/malware/cmdhistory.py | 50 | 34078 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# Authors:
# Michael Hale Ligh <michael.ligh@mnin.org>
#
# Contributors/References:
# Richard Stevens and Eoghan Casey
# Extracting Windows Cmd Line Details from Physical Memory.
# http://ww.dfrws.org/2010/proceedings/stevens.pdf
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import volatility.obj as obj
import volatility.plugins.common as common
import volatility.utils as utils
import volatility.win32.tasks as tasks
import volatility.debug as debug
MAX_HISTORY_DEFAULT = 50
#--------------------------------------------------------------------------------
# VTypes
#--------------------------------------------------------------------------------
# Windows 7 Types from conhost.exe
conhost_types_x86 = {
'_COMMAND': [ None, {
'CmdLength': [ 0x00, ['unsigned short']],
'Cmd' : [ 0x02, ['String', dict(encoding = 'utf16', length = lambda x : x.CmdLength)]],
}],
'_COMMAND_HISTORY': [ None, {
'ListEntry': [ 0x00, ['_LIST_ENTRY']],
'Flags' : [ 0x08, ['Flags', {'bitmap': {'Allocated': 0, 'Reset': 1}}]],
'Application': [ 0x0C, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
'CommandCount': [ 0x10, ['short']],
'LastAdded': [ 0x12, ['short']],
'LastDisplayed': [ 0x14, ['short']],
'FirstCommand': [ 0x16, ['short']],
'CommandCountMax': [ 0x18, ['short']],
'ProcessHandle': [ 0x1C, ['unsigned int']],
'PopupList': [ 0x20, ['_LIST_ENTRY']],
'CommandBucket': [ 0x28, ['array', lambda x : x.CommandCount, ['pointer', ['_COMMAND']]]],
}],
'_ALIAS': [ None, {
'ListEntry': [ 0x00, ['_LIST_ENTRY']],
'SourceLength': [ 0x08, ['unsigned short']],
'TargetLength': [ 0x0A, ['unsigned short']],
'Source': [ 0x0C, ['pointer', ['String', dict(encoding = 'utf16', length = lambda x : x.SourceLength)]]],
'Target': [ 0x10, ['pointer', ['String', dict(encoding = 'utf16', length = lambda x : x.TargetLength)]]],
}],
'_EXE_ALIAS_LIST' : [ None, {
'ListEntry': [ 0x00, ['_LIST_ENTRY']],
'ExeLength': [ 0x08, ['unsigned short']],
'ExeName': [ 0x0C, ['pointer', ['String', dict(encoding = 'utf16', length = lambda x : x.ExeLength * 2)]]],
'AliasList': [ 0x10, ['_LIST_ENTRY']],
}],
'_POPUP_LIST' : [ None, {
'ListEntry' : [ 0x00, ['_LIST_ENTRY']],
}],
'_CONSOLE_INFORMATION': [ None, {
'CurrentScreenBuffer': [ 0x98, ['pointer', ['_SCREEN_INFORMATION']]],
'ScreenBuffer': [ 0x9C, ['pointer', ['_SCREEN_INFORMATION']]],
'HistoryList': [ 0xD4, ['_LIST_ENTRY']],
'ProcessList': [ 0x18, ['_LIST_ENTRY']], # GetConsoleProcessList()
'ExeAliasList': [ 0xDC, ['_LIST_ENTRY']], # GetConsoleAliasExes()
'HistoryBufferCount': [ 0xE4, ['unsigned short']], # GetConsoleHistoryInfo()
'HistoryBufferMax': [ 0xE6, ['unsigned short']], # GetConsoleHistoryInfo()
'CommandHistorySize': [ 0xE8, ['unsigned short']],
'OriginalTitle': [ 0xEC, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]], # GetConsoleOriginalTitle()
'Title': [ 0xF0, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]], # GetConsoleTitle()
}],
'_CONSOLE_PROCESS': [ None, {
'ListEntry': [ 0x00, ['_LIST_ENTRY']],
'ProcessHandle': [ 0x8, ['unsigned int']],
}],
'_SCREEN_INFORMATION': [ None, {
'ScreenX': [ 0x08, ['short']],
'ScreenY': [ 0x0A, ['short']],
'Rows': [ 0x3C, ['pointer', ['array', lambda x : x.ScreenY, ['_ROW']]]],
'Next': [ 0xDC, ['pointer', ['_SCREEN_INFORMATION']]],
}],
'_ROW': [ 0x1C, {
'Chars': [ 0x08, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
}],
}
# Windows 7 Types from conhost.exe
conhost_types_x64 = {
'_COMMAND': [ None, {
'CmdLength': [ 0x00, ['unsigned short']],
'Cmd' : [ 0x02, ['String', dict(encoding = 'utf16', length = lambda x : x.CmdLength)]],
}],
'_COMMAND_HISTORY': [ None, {
'ListEntry': [ 0x00, ['_LIST_ENTRY']],
'Flags' : [ 0x10, ['Flags', {'bitmap': {'Allocated': 0, 'Reset': 1}}]], # AllocateCommandHistory()
'Application': [ 0x18, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]], # AllocateCommandHistory()
'CommandCount': [ 0x20, ['short']],
'LastAdded': [ 0x22, ['short']],
'LastDisplayed': [ 0x24, ['short']],
'FirstCommand': [ 0x26, ['short']],
'CommandCountMax': [ 0x28, ['short']], # AllocateCommandHistory()
'ProcessHandle': [ 0x30, ['address']], # AllocateCommandHistory()
'PopupList': [ 0x38, ['_LIST_ENTRY']], # AllocateCommandHistory()
'CommandBucket': [ 0x48, ['array', lambda x : x.CommandCount, ['pointer', ['_COMMAND']]]],
}],
'_ALIAS': [ None, {
'ListEntry': [ 0x00, ['_LIST_ENTRY']],
'SourceLength': [ 0x10, ['unsigned short']], # AddAlias()
'TargetLength': [ 0x12, ['unsigned short']], # AddAlias()
'Source': [ 0x18, ['pointer', ['String', dict(encoding = 'utf16', length = lambda x : x.SourceLength)]]], # AddAlias()
'Target': [ 0x20, ['pointer', ['String', dict(encoding = 'utf16', length = lambda x : x.TargetLength)]]], # AddAlias()
}],
'_EXE_ALIAS_LIST' : [ None, {
'ListEntry': [ 0x00, ['_LIST_ENTRY']],
'ExeLength': [ 0x10, ['unsigned short']], # AddExeAliasList()
'ExeName': [ 0x18, ['pointer', ['String', dict(encoding = 'utf16', length = lambda x : x.ExeLength * 2)]]], # AddExeAliasList()
'AliasList': [ 0x20, ['_LIST_ENTRY']], # AddExeAliasList()
}],
'_POPUP_LIST' : [ None, {
'ListEntry' : [ 0x00, ['_LIST_ENTRY']],
}],
'_CONSOLE_INFORMATION': [ None, {
'ProcessList': [ 0x28, ['_LIST_ENTRY']], # SrvGetConsoleProcessList()
'CurrentScreenBuffer': [ 0xE0, ['pointer', ['_SCREEN_INFORMATION']]], # AllocateConsole()
'ScreenBuffer': [ 0xE8, ['pointer', ['_SCREEN_INFORMATION']]], # AllocateConsole()
'HistoryList': [ 0x148, ['_LIST_ENTRY']], # AllocateCommandHistory()
'ExeAliasList': [ 0x158, ['_LIST_ENTRY']], # SrvGetConsoleAliasExes()
'HistoryBufferCount': [ 0x168, ['unsigned short']], # AllocateConsole()
'HistoryBufferMax': [ 0x16A, ['unsigned short']], # AllocateConsole()
'CommandHistorySize': [ 0x16C, ['unsigned short']], # AllocateConsole()
'OriginalTitle': [ 0x170, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]], # SrvGetConsoleTitle()
'Title': [ 0x178, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]], # SrvGetConsoleTitle()
}],
'_CONSOLE_PROCESS': [ None, {
'ListEntry': [ 0x00, ['_LIST_ENTRY']],
'ProcessHandle': [ 0x10, ['unsigned int']], # FindProcessInList()
}],
'_SCREEN_INFORMATION': [ None, {
'ScreenX': [ 8, ['short']],
'ScreenY': [ 10, ['short']],
'Rows': [ 0x48, ['pointer', ['array', lambda x : x.ScreenY, ['_ROW']]]],
'Next': [ 0x128, ['pointer', ['_SCREEN_INFORMATION']]],
}],
'_ROW': [ 0x28, {
'Chars': [ 0x08, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
}],
}
# Windows XP, 2003, 2008, Vista from winsrv.dll
winsrv_types_x86 = {
'_COMMAND': [ None, {
'CmdLength': [ 0x00, ['unsigned short']],
'Cmd' : [ 0x02, ['String', dict(encoding = 'utf16', length = lambda x : x.CmdLength)]],
}],
'_COMMAND_HISTORY': [ None, {
'Flags' : [ 0x00, ['Flags', {'bitmap': {'Allocated': 0, 'Reset': 1}}]],
'ListEntry': [ 0x04, ['_LIST_ENTRY']],
'Application': [ 0x0C, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
'CommandCount': [ 0x10, ['short']],
'LastAdded': [ 0x12, ['short']],
'LastDisplayed': [ 0x14, ['short']],
'FirstCommand': [ 0x16, ['short']],
'CommandCountMax': [ 0x18, ['short']],
'ProcessHandle': [ 0x1C, ['unsigned int']],
'PopupList': [ 0x20, ['_LIST_ENTRY']],
'CommandBucket': [ 0x28, ['array', lambda x : x.CommandCount, ['pointer', ['_COMMAND']]]],
}],
'_ALIAS': [ None, {
'ListEntry': [ 0x00, ['_LIST_ENTRY']],
'SourceLength': [ 0x08, ['unsigned short']],
'TargetLength': [ 0x0A, ['unsigned short']],
'Source': [ 0x0C, ['pointer', ['String', dict(encoding = 'utf16', length = lambda x : x.SourceLength)]]],
'Target': [ 0x10, ['pointer', ['String', dict(encoding = 'utf16', length = lambda x : x.TargetLength)]]],
}],
'_EXE_ALIAS_LIST' : [ None, {
'ListEntry': [ 0x00, ['_LIST_ENTRY']],
'ExeLength': [ 0x08, ['unsigned short']],
'ExeName': [ 0x0C, ['pointer', ['String', dict(encoding = 'utf16', length = lambda x : x.ExeLength * 2)]]],
'AliasList': [ 0x10, ['_LIST_ENTRY']],
}],
'_POPUP_LIST' : [ None, {
'ListEntry' : [ 0x00, ['_LIST_ENTRY']],
}],
'_CONSOLE_INFORMATION': [ None, {
'CurrentScreenBuffer': [ 0xB0, ['pointer', ['_SCREEN_INFORMATION']]],
'ScreenBuffer': [ 0xB4, ['pointer', ['_SCREEN_INFORMATION']]],
'HistoryList': [ 0x108, ['_LIST_ENTRY']],
'ProcessList': [ 0x100, ['_LIST_ENTRY']],
'ExeAliasList': [ 0x110, ['_LIST_ENTRY']],
'HistoryBufferCount': [ 0x118, ['unsigned short']],
'HistoryBufferMax': [ 0x11A, ['unsigned short']],
'CommandHistorySize': [ 0x11C, ['unsigned short']],
'OriginalTitle': [ 0x124, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
'Title': [ 0x128, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
}],
'_CONSOLE_PROCESS': [ None, {
'ListEntry': [ 0x00, ['_LIST_ENTRY']],
'ProcessHandle': [ 0x08, ['unsigned int']],
'Process': [ 0x0C, ['pointer', ['_CSR_PROCESS']]],
}],
'_SCREEN_INFORMATION': [ None, {
'Console': [ 0x00, ['pointer', ['_CONSOLE_INFORMATION']]],
'ScreenX': [ 0x24, ['short']],
'ScreenY': [ 0x26, ['short']],
'Rows': [ 0x58, ['pointer', ['array', lambda x : x.ScreenY, ['_ROW']]]],
'Next': [ 0xF8, ['pointer', ['_SCREEN_INFORMATION']]],
}],
'_ROW': [ 0x1C, {
'Chars': [ 0x08, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
}],
'_CSR_PROCESS' : [ 0x60, { # this is a public PDB
'ClientId' : [ 0x0, ['_CLIENT_ID']],
'ListLink' : [ 0x8, ['_LIST_ENTRY']],
'ThreadList' : [ 0x10, ['_LIST_ENTRY']],
'NtSession' : [ 0x18, ['pointer', ['_CSR_NT_SESSION']]],
'ClientPort' : [ 0x1c, ['pointer', ['void']]],
'ClientViewBase' : [ 0x20, ['pointer', ['unsigned char']]],
'ClientViewBounds' : [ 0x24, ['pointer', ['unsigned char']]],
'ProcessHandle' : [ 0x28, ['pointer', ['void']]],
'SequenceNumber' : [ 0x2c, ['unsigned long']],
'Flags' : [ 0x30, ['unsigned long']],
'DebugFlags' : [ 0x34, ['unsigned long']],
'ReferenceCount' : [ 0x38, ['unsigned long']],
'ProcessGroupId' : [ 0x3c, ['unsigned long']],
'ProcessGroupSequence' : [ 0x40, ['unsigned long']],
'LastMessageSequence' : [ 0x44, ['unsigned long']],
'NumOutstandingMessages' : [ 0x48, ['unsigned long']],
'ShutdownLevel' : [ 0x4c, ['unsigned long']],
'ShutdownFlags' : [ 0x50, ['unsigned long']],
'Luid' : [ 0x54, ['_LUID']],
'ServerDllPerProcessData' : [ 0x5c, ['array', 1, ['pointer', ['void']]]],
}],
}
winsrv_types_x64 = {
'_COMMAND': [ None, {
'CmdLength': [ 0x00, ['unsigned short']],
'Cmd' : [ 0x02, ['String', dict(encoding = 'utf16', length = lambda x : x.CmdLength)]],
}],
'_COMMAND_HISTORY': [ None, {
'Flags' : [ 0x00, ['Flags', {'bitmap': {'Allocated': 0, 'Reset': 1}}]],
'ListEntry': [ 0x08, ['_LIST_ENTRY']],
'Application': [ 0x18, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
'CommandCount': [ 0x20, ['short']],
'LastAdded': [ 0x22, ['short']],
'LastDisplayed': [ 0x24, ['short']],
'FirstCommand': [ 0x26, ['short']],
'CommandCountMax': [ 0x28, ['short']],
'ProcessHandle': [ 0x30, ['unsigned int']],
'PopupList': [ 0x38, ['_LIST_ENTRY']],
'CommandBucket': [ 0x48, ['array', lambda x : x.CommandCount, ['pointer', ['_COMMAND']]]],
}],
'_ALIAS': [ None, {
'ListEntry': [ 0x00, ['_LIST_ENTRY']],
'SourceLength': [ 0x10, ['unsigned short']],
'TargetLength': [ 0x12, ['unsigned short']],
'Source': [ 0x14, ['pointer', ['String', dict(encoding = 'utf16', length = lambda x : x.SourceLength)]]],
'Target': [ 0x1C, ['pointer', ['String', dict(encoding = 'utf16', length = lambda x : x.TargetLength)]]],
}],
'_EXE_ALIAS_LIST' : [ None, {
'ListEntry': [ 0x00, ['_LIST_ENTRY']],
'ExeLength': [ 0x10, ['unsigned short']],
'ExeName': [ 0x12, ['pointer', ['String', dict(encoding = 'utf16', length = lambda x : x.ExeLength * 2)]]],
'AliasList': [ 0x1A, ['_LIST_ENTRY']],
}],
'_POPUP_LIST' : [ None, {
'ListEntry' : [ 0x00, ['_LIST_ENTRY']],
}],
'_CONSOLE_INFORMATION': [ None, {
'CurrentScreenBuffer': [ 0xE8, ['pointer', ['_SCREEN_INFORMATION']]],
'ScreenBuffer': [ 0xF0, ['pointer', ['_SCREEN_INFORMATION']]],
'HistoryList': [ 0x188, ['_LIST_ENTRY']],
'ProcessList': [ 0x178, ['_LIST_ENTRY']],
'ExeAliasList': [ 0x198, ['_LIST_ENTRY']],
'HistoryBufferCount': [ 0x1A8, ['unsigned short']],
'HistoryBufferMax': [ 0x1AA, ['unsigned short']],
'CommandHistorySize': [ 0x1AC, ['unsigned short']],
'OriginalTitle': [ 0x1B0, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
'Title': [ 0x1B8, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
}],
'_CONSOLE_PROCESS': [ None, {
'ListEntry': [ 0x00, ['_LIST_ENTRY']],
'ProcessHandle': [ 0x10, ['unsigned int']],
'Process': [ 0x18, ['pointer', ['_CSR_PROCESS']]],
}],
'_SCREEN_INFORMATION': [ None, {
'Console': [ 0x00, ['pointer', ['_CONSOLE_INFORMATION']]],
'ScreenX': [ 0x28, ['short']],
'ScreenY': [ 0x2A, ['short']],
'Rows': [ 0x68, ['pointer', ['array', lambda x : x.ScreenY, ['_ROW']]]],
'Next': [ 0x128, ['pointer', ['_SCREEN_INFORMATION']]],
}],
'_ROW': [ 0x28, {
'Chars': [ 0x08, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
}],
'_CSR_PROCESS' : [ 0x60, { # this is a public PDB
'ClientId' : [ 0x0, ['_CLIENT_ID']],
'ListLink' : [ 0x8, ['_LIST_ENTRY']],
'ThreadList' : [ 0x10, ['_LIST_ENTRY']],
'NtSession' : [ 0x18, ['pointer', ['_CSR_NT_SESSION']]],
'ClientPort' : [ 0x1c, ['pointer', ['void']]],
'ClientViewBase' : [ 0x20, ['pointer', ['unsigned char']]],
'ClientViewBounds' : [ 0x24, ['pointer', ['unsigned char']]],
'ProcessHandle' : [ 0x28, ['pointer', ['void']]],
'SequenceNumber' : [ 0x2c, ['unsigned long']],
'Flags' : [ 0x30, ['unsigned long']],
'DebugFlags' : [ 0x34, ['unsigned long']],
'ReferenceCount' : [ 0x38, ['unsigned long']],
'ProcessGroupId' : [ 0x3c, ['unsigned long']],
'ProcessGroupSequence' : [ 0x40, ['unsigned long']],
'LastMessageSequence' : [ 0x44, ['unsigned long']],
'NumOutstandingMessages' : [ 0x48, ['unsigned long']],
'ShutdownLevel' : [ 0x4c, ['unsigned long']],
'ShutdownFlags' : [ 0x50, ['unsigned long']],
'Luid' : [ 0x54, ['_LUID']],
'ServerDllPerProcessData' : [ 0x5c, ['array', 1, ['pointer', ['void']]]],
}],
}
#--------------------------------------------------------------------------------
# Object Classes
#--------------------------------------------------------------------------------
class _CONSOLE_INFORMATION(obj.CType):
""" object class for console information structs """
def get_histories(self):
for hist in self.HistoryList.list_of_type("_COMMAND_HISTORY", "ListEntry"):
yield hist
def get_exe_aliases(self):
"""Generator for exe aliases.
There is one _EXE_ALIAS_LIST for each executable
(i.e. C:\windows\system32\cmd.exe) with registered
aliases. The _EXE_ALIAS_LIST.AliasList contains
one _ALIAS structure for each specific mapping.
See GetConsoleAliasExes, GetConsoleAliases, and
AddConsoleAlias.
"""
for exe_alias in self.ExeAliasList.list_of_type("_EXE_ALIAS_LIST", "ListEntry"):
yield exe_alias
def get_processes(self):
"""Generator for processes attached to the console.
Multiple processes can be attached to the same
console (usually as a result of inheritance from a
parent process or by duplicating another process's
console handle). Internally, they are tracked as
_CONSOLE_PROCESS structures in this linked list.
See GetConsoleProcessList and AttachConsole.
"""
for h in self.ProcessList.list_of_type("_CONSOLE_PROCESS", "ListEntry"):
yield h
def get_screens(self):
"""Generator for screens in the console.
A console can have multiple screen buffers at a time,
but only the current/active one is displayed.
Multiple screens are tracked using the singly-linked
list _SCREEN_INFORMATION.Next.
See CreateConsoleScreenBuffer
"""
screens = [self.CurrentScreenBuffer]
if self.ScreenBuffer not in screens:
screens.append(self.ScreenBuffer)
for screen in screens:
cur = screen
while cur and cur.v() != 0:
yield cur
cur = cur.Next.dereference()
class _CONSOLE_PROCESS(obj.CType):
""" object class for console process """
def reference_object_by_handle(self):
""" Given a process handle, return a reference to
the _EPROCESS object. This function is similar to
the kernel API ObReferenceObjectByHandle. """
console_information = self.obj_parent
parent_process = console_information.obj_parent
for h in parent_process.ObjectTable.handles():
if h.HandleValue == self.ProcessHandle:
return h.dereference_as("_EPROCESS")
return obj.NoneObject("Could not find process in handle table")
class _SCREEN_INFORMATION(obj.CType):
""" object class for screen information """
def get_buffer(self, truncate = True):
"""Get the screen buffer.
The screen buffer is comprised of the screen's Y
coordinate which tells us the number of rows and
the X coordinate which tells us the width of each
row in characters. These together provide all of
the input and output that users see when the
console is displayed.
@param truncate: True if the empty rows at the
end (i.e. bottom) of the screen buffer should be
supressed.
"""
rows = []
for _, row in enumerate(self.Rows.dereference()):
if row.Chars.is_valid():
rows.append(str(row.Chars.dereference())[0:self.ScreenX])
# To truncate empty rows at the end, walk the list
# backwards and get the last non-empty row. Use that
# row index to splice. An "empty" row isn't just ""
# as one might assume. It is actually ScreenX number
# of space characters
if truncate:
non_empty_index = 0
for index, row in enumerate(reversed(rows)):
## It seems that when the buffer width is greater than 128
## characters, its truncated to 128 in memory.
if row.count(" ") != min(self.ScreenX, 128):
non_empty_index = index
break
if non_empty_index == 0:
rows = []
else:
rows = rows[0:len(rows) - non_empty_index]
return rows
class _EXE_ALIAS_LIST(obj.CType):
""" object class for alias lists """
def get_aliases(self):
"""Generator for the individual aliases for a
particular executable."""
for alias in self.AliasList.list_of_type("_ALIAS", "ListEntry"):
yield alias
class _COMMAND_HISTORY(obj.CType):
""" object class for command histories """
def is_valid(self, max_history = MAX_HISTORY_DEFAULT): #pylint: disable-msg=W0221
"""Override BaseObject.is_valid with some additional
checks specific to _COMMAND_HISTORY objects."""
if not obj.CType.is_valid(self):
return False
# The count must be between zero and max
if self.CommandCount < 0 or self.CommandCount > max_history:
return False
# Last added must be between -1 and max
if self.LastAdded < -1 or self.LastAdded > max_history:
return False
# Last displayed must be between -1 and max
if self.LastDisplayed < -1 or self.LastDisplayed > max_history:
return False
# First command must be between zero and max
if self.FirstCommand < 0 or self.FirstCommand > max_history:
return False
# Validate first command with last added
if self.FirstCommand != 0 and self.FirstCommand != self.LastAdded + 1:
return False
# Process handle must be a valid pid
if self.ProcessHandle <= 0 or self.ProcessHandle > 0xFFFF:
return False
Popup = obj.Object("_POPUP_LIST", offset = self.PopupList.Flink,
vm = self.obj_vm)
# Check that the popup list entry is in tact
if Popup.ListEntry.Blink != self.PopupList.obj_offset:
return False
return True
def get_commands(self):
"""Generator for commands in the history buffer.
The CommandBucket is an array of pointers to _COMMAND
structures. The array size is CommandCount. Once CommandCount
is reached, the oldest commands are cycled out and the
rest are coalesced.
"""
for i, cmd in enumerate(self.CommandBucket):
if cmd:
yield i, cmd.dereference()
#--------------------------------------------------------------------------------
# Profile Modifications
#--------------------------------------------------------------------------------
class CmdHistoryVTypesx86(obj.ProfileModification):
"""This modification applies the vtypes for 32bit
Windows up to Windows 7."""
before = ['WindowsObjectClasses']
def check(self, profile):
m = profile.metadata
return (m.get('os', None) == 'windows' and
m.get('memory_model', '32bit') == '32bit' and
(m.get('major') < 6 or (m.get('major') == 6 and m.get('minor') < 1)))
def modification(self, profile):
profile.vtypes.update(winsrv_types_x86)
class CmdHistoryVTypesx64(obj.ProfileModification):
"""This modification applies the vtypes for 64bit
Windows up to Windows 7."""
before = ['WindowsObjectClasses']
def check(self, profile):
m = profile.metadata
return (m.get('os', None) == 'windows' and
m.get('memory_model', '32bit') == '64bit' and
(m.get('major') < 6 or (m.get('major') == 6 and m.get('minor') < 1)))
def modification(self, profile):
profile.vtypes.update(winsrv_types_x64)
class CmdHistoryVTypesWin7x86(obj.ProfileModification):
"""This modification applies the vtypes for 32bit
Windows starting with Windows 7."""
before = ['WindowsObjectClasses']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x >= 1,
'memory_model': lambda x : x == '32bit'}
def modification(self, profile):
profile.vtypes.update(conhost_types_x86)
class CmdHistoryVTypesWin7x64(obj.ProfileModification):
"""This modification applies the vtypes for 64bit
Windows starting with Windows 7."""
before = ['WindowsObjectClasses']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x >= 1,
'memory_model': lambda x : x == '64bit'}
def modification(self, profile):
profile.vtypes.update(conhost_types_x64)
class CmdHistoryObjectClasses(obj.ProfileModification):
"""This modification applies the object classes for all
versions of 32bit Windows."""
before = ['WindowsObjectClasses']
conditions = {'os': lambda x: x == 'windows'}
# 'memory_model': lambda x : x == '32bit'}
def modification(self, profile):
profile.object_classes.update({
'_CONSOLE_INFORMATION': _CONSOLE_INFORMATION,
'_SCREEN_INFORMATION': _SCREEN_INFORMATION,
'_EXE_ALIAS_LIST': _EXE_ALIAS_LIST,
'_COMMAND_HISTORY': _COMMAND_HISTORY,
'_CONSOLE_PROCESS': _CONSOLE_PROCESS,
})
#--------------------------------------------------------------------------------
# CmdScan Plugin
#--------------------------------------------------------------------------------
class CmdScan(common.AbstractWindowsCommand):
"""Extract command history by scanning for _COMMAND_HISTORY"""
def __init__(self, config, *args, **kwargs):
common.AbstractWindowsCommand.__init__(self, config, *args, **kwargs)
# The default comes from HKCU\Console\HistoryBufferSize
config.add_option('MAX_HISTORY', short_option = 'M', default = MAX_HISTORY_DEFAULT,
action = 'store', type = 'int',
help = 'CommandCountMax (default = 50)')
def cmdhistory_process_filter(self, addr_space):
"""Generator for processes that might contain command
history information.
Takes into account if we're on Windows 7 or an earlier
operator system.
@param addr_space: a kernel address space.
"""
# Detect if we're on windows seven
use_conhost = (6, 1) <= (addr_space.profile.metadata.get('major', 0),
addr_space.profile.metadata.get('minor', 0))
for task in tasks.pslist(addr_space):
process_name = str(task.ImageFileName).lower()
# The process we select is conhost on Win7 or csrss for others
if ((use_conhost and process_name == "conhost.exe") or
(not use_conhost and process_name == "csrss.exe")):
yield task
def calculate(self):
"""The default pattern we search for, as described by Stevens and Casey,
is "\x32\x00". That's because CommandCountMax is a little-endian
unsigned short whose default value is 50. However, that value can be
changed by right clicking cmd.exe and going to Properties->Options->Cmd History
or by calling the API function kernel32!SetConsoleHistoryInfo. Thus
you can tweak the search criteria by using the --MAX_HISTORY.
"""
addr_space = utils.load_as(self._config)
MAX_HISTORY = self._config.MAX_HISTORY
srch_pattern = chr(MAX_HISTORY) + "\x00"
for task in self.cmdhistory_process_filter(addr_space):
process_space = task.get_process_address_space()
for found in task.search_process_memory([srch_pattern]):
hist = obj.Object("_COMMAND_HISTORY",
vm = process_space,
offset = found - addr_space.profile.\
get_obj_offset("_COMMAND_HISTORY", "CommandCountMax"))
if hist.is_valid(max_history = MAX_HISTORY):
yield task, hist
def render_text(self, outfd, data):
for task, hist in data:
outfd.write("*" * 50 + "\n")
outfd.write("CommandProcess: {0} Pid: {1}\n".format(
task.ImageFileName, task.UniqueProcessId))
outfd.write("CommandHistory: {0:#x} Application: {1} Flags: {2}\n".format(
hist.obj_offset, hist.Application.dereference(),
hist.Flags))
outfd.write("CommandCount: {0} LastAdded: {1} LastDisplayed: {2}\n".format(
hist.CommandCount, hist.LastAdded, hist.LastDisplayed))
outfd.write("FirstCommand: {0} CommandCountMax: {1}\n".format(
hist.FirstCommand, hist.CommandCountMax))
outfd.write("ProcessHandle: {0:#x}\n".format(hist.ProcessHandle))
# If the _COMMAND_HISTORY is in use, we would only take
# hist.CommandCount but since we're brute forcing, try the
# maximum and hope that some slots were not overwritten
# or zero-ed out.
pointers = obj.Object("Array", targetType = "address",
count = hist.CommandCountMax,
offset = hist.obj_offset +
hist.obj_vm.profile.get_obj_offset("_COMMAND_HISTORY", "CommandBucket"),
vm = hist.obj_vm)
for i, p in enumerate(pointers):
cmd = p.dereference_as("_COMMAND")
if cmd and str(cmd.Cmd):
outfd.write("Cmd #{0} @ {1:#x}: {2}\n".format(
i, cmd.obj_offset, str(cmd.Cmd)))
#--------------------------------------------------------------------------------
# Consoles Plugin
#--------------------------------------------------------------------------------
class Consoles(CmdScan):
"""Extract command history by scanning for _CONSOLE_INFORMATION"""
def __init__(self, config, *args, **kwargs):
CmdScan.__init__(self, config, *args, **kwargs)
# The default comes from HKCU\Console\NumberOfHistoryBuffers
config.add_option('HISTORY_BUFFERS', short_option = 'B', default = 4,
action = 'store', type = 'int',
help = 'HistoryBufferMax (default = 4)')
def calculate(self):
addr_space = utils.load_as(self._config)
srch_pattern = chr(self._config.MAX_HISTORY) + "\x00"
for task in self.cmdhistory_process_filter(addr_space):
for found in task.search_process_memory([srch_pattern]):
console = obj.Object("_CONSOLE_INFORMATION",
offset = found -
addr_space.profile.get_obj_offset("_CONSOLE_INFORMATION", "CommandHistorySize"),
vm = task.get_process_address_space(),
parent = task)
if (console.HistoryBufferMax != self._config.HISTORY_BUFFERS or
console.HistoryBufferCount > self._config.HISTORY_BUFFERS):
continue
# Check the first command history as the final constraint
history = obj.Object("_COMMAND_HISTORY",
offset = console.HistoryList.Flink.dereference().obj_offset -
addr_space.profile.get_obj_offset("_COMMAND_HISTORY", "ListEntry"),
vm = task.get_process_address_space())
if history.CommandCountMax != self._config.MAX_HISTORY:
continue
yield task, console
def render_text(self, outfd, data):
for task, console in data:
outfd.write("*" * 50 + "\n")
outfd.write("ConsoleProcess: {0} Pid: {1}\n".format(
task.ImageFileName, task.UniqueProcessId))
outfd.write("Console: {0:#x} CommandHistorySize: {1}\n".format(
console.obj_offset, console.CommandHistorySize))
outfd.write("HistoryBufferCount: {0} HistoryBufferMax: {1}\n".format(
console.HistoryBufferCount, console.HistoryBufferMax))
outfd.write("OriginalTitle: {0}\n".format(console.OriginalTitle.dereference()))
outfd.write("Title: {0}\n".format(console.Title.dereference()))
for console_proc in console.get_processes():
process = console_proc.reference_object_by_handle()
if process:
outfd.write("AttachedProcess: {0} Pid: {1} Handle: {2:#x}\n".format(
process.ImageFileName, process.UniqueProcessId,
console_proc.ProcessHandle))
for hist in console.get_histories():
outfd.write("----\n")
outfd.write("CommandHistory: {0:#x} Application: {1} Flags: {2}\n".format(
hist.obj_offset, hist.Application.dereference(),
hist.Flags))
outfd.write("CommandCount: {0} LastAdded: {1} LastDisplayed: {2}\n".format(
hist.CommandCount, hist.LastAdded, hist.LastDisplayed))
outfd.write("FirstCommand: {0} CommandCountMax: {1}\n".format(
hist.FirstCommand, hist.CommandCountMax))
outfd.write("ProcessHandle: {0:#x}\n".format(hist.ProcessHandle))
for i, cmd in hist.get_commands():
if cmd.Cmd:
outfd.write("Cmd #{0} at {1:#x}: {2}\n".format(
i, cmd.obj_offset, str(cmd.Cmd)))
for exe_alias in console.get_exe_aliases():
for alias in exe_alias.get_aliases():
outfd.write("----\n")
outfd.write("Alias: {0} Source: {1} Target: {2}\n".format(
exe_alias.ExeName.dereference(), alias.Source.dereference(),
alias.Target.dereference()))
for screen in console.get_screens():
outfd.write("----\n")
outfd.write("Screen {0:#x} X:{1} Y:{2}\n".format(
screen.dereference(), screen.ScreenX, screen.ScreenY))
outfd.write("Dump:\n{0}\n".format('\n'.join(screen.get_buffer())))
| gpl-2.0 |
lumig242/Hue-Integration-with-CDAP | desktop/core/ext-py/openpyxl-2.3.0-b2/openpyxl/chart/series_factory.py | 13 | 1413 | from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
from .data_source import NumDataSource, NumRef, AxDataSource
from .reference import Reference
from .series import Series, XYSeries, SeriesLabel, StrRef
from ..utils import SHEETRANGE_RE, rows_from_range, quote_sheetname
def SeriesFactory(values, xvalues=None, zvalues=None, title=None, title_from_data=False):
"""
Convenience Factory for creating chart data series.
"""
if not isinstance(values, Reference):
values = Reference(range_string=values)
if title_from_data:
cell = values.pop()
title = "{0}!{1}".format(values.sheetname, cell)
title = SeriesLabel(strRef=StrRef(title))
elif title is not None:
title = SeriesLabel(v=title)
source = NumDataSource(numRef=NumRef(f=values))
if xvalues is not None:
if not isinstance(xvalues, Reference):
xvalues = Reference(range_string=xvalues)
series = XYSeries()
series.yVal = source
series.xVal = AxDataSource(numRef=NumRef(f=xvalues))
if zvalues is not None:
if not isinstance(zvalues, Reference):
zvalues = Reference(range_string=zvalues)
series.zVal = NumDataSource(NumRef(f=zvalues))
else:
series = Series()
series.val = source
if title is not None:
series.title = title
return series
| apache-2.0 |
AsgerPetersen/QGIS | python/core/__init__.py | 1 | 5343 | # -*- coding: utf-8 -*-
"""
***************************************************************************
__init__.py
---------------------
Date : May 2014
Copyright : (C) 2014 by Nathan Woodrow
Email : woodrow dot nathan at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nathan Woodrow'
__date__ = 'May 2014'
__copyright__ = '(C) 2014, Nathan Woodrow'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.PyQt.QtCore import QCoreApplication, NULL
import inspect
import string
from qgis._core import *
def register_function(function, arg_count, group, usesgeometry=False, **kwargs):
"""
Register a Python function to be used as a expression function.
Functions should take (values, feature, parent) as args:
Example:
def myfunc(values, feature, parent):
pass
They can also shortcut naming feature and parent args by using *args
if they are not needed in the function.
Example:
def myfunc(values, *args):
pass
Functions should return a value compatible with QVariant
Eval errors can be raised using parent.setEvalErrorString("Error message")
:param function:
:param arg_count:
:param group:
:param usesgeometry:
:return:
"""
class QgsExpressionFunction(QgsExpression.Function):
def __init__(self, func, name, args, group, helptext='', usesgeometry=True, expandargs=False):
QgsExpression.Function.__init__(self, name, args, group, helptext, usesgeometry)
self.function = func
self.expandargs = expandargs
def func(self, values, feature, parent):
try:
if self.expandargs:
values.append(feature)
values.append(parent)
return self.function(*values)
else:
return self.function(values, feature, parent)
except Exception as ex:
parent.setEvalErrorString(str(ex))
return None
helptemplate = string.Template("""<h3>$name function</h3><br>$doc""")
name = kwargs.get('name', function.__name__)
helptext = function.__doc__ or ''
helptext = helptext.strip()
expandargs = False
if arg_count == "auto":
# Work out the number of args we need.
# Number of function args - 2. The last two args are always feature, parent.
args = inspect.getargspec(function).args
number = len(args)
arg_count = number - 2
expandargs = True
register = kwargs.get('register', True)
if register and QgsExpression.isFunctionName(name):
if not QgsExpression.unregisterFunction(name):
msgtitle = QCoreApplication.translate("UserExpressions", "User expressions")
msg = QCoreApplication.translate("UserExpressions", "The user expression {0} already exists and could not be unregistered.").format(name)
QgsMessageLog.logMessage(msg + "\n", msgtitle, QgsMessageLog.WARNING)
return None
function.__name__ = name
helptext = helptemplate.safe_substitute(name=name, doc=helptext)
f = QgsExpressionFunction(function, name, arg_count, group, helptext, usesgeometry, expandargs)
# This doesn't really make any sense here but does when used from a decorator context
# so it can stay.
if register:
QgsExpression.registerFunction(f)
return f
def qgsfunction(args='auto', group='custom', **kwargs):
"""
Decorator function used to define a user expression function.
Example:
@qgsfunction(2, 'test'):
def add(values, feature, parent):
pass
Will create and register a function in QgsExpression called 'add' in the
'test' group that takes two arguments.
or not using feature and parent:
Example:
@qgsfunction(2, 'test'):
def add(values, *args):
pass
"""
def wrapper(func):
return register_function(func, args, group, **kwargs)
return wrapper
class QgsEditError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# Define a `with edit(layer)` statement
class edit:
def __init__(self, layer):
self.layer = layer
def __enter__(self):
assert self.layer.startEditing()
return self.layer
def __exit__(self, ex_type, ex_value, traceback):
if ex_type is None:
if not self.layer.commitChanges():
raise QgsEditError(self.layer.commitErrors())
return True
else:
self.layer.rollBack()
return False
| gpl-2.0 |
chjj/bitcoin | qa/rpc-tests/rpcbind_test.py | 17 | 5746 | #!/usr/bin/env python
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test for -rpcbind, as well as -rpcallowip and -rpcconnect
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
import json
import shutil
import subprocess
import tempfile
import traceback
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
from netutil import *
def run_bind_test(tmpdir, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
nodes = start_nodes(1, tmpdir, [base_args + binds], connect_to)
try:
pid = bitcoind_processes[0].pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
finally:
stop_nodes(nodes)
wait_bitcoinds()
def run_allowip_test(tmpdir, allow_ips, rpchost, rpcport):
'''
Start a node with rpcwallow IP, and request getinfo
at a non-localhost IP.
'''
base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
nodes = start_nodes(1, tmpdir, [base_args])
try:
# connect to node through non-loopback interface
url = "http://rt:rt@%s:%d" % (rpchost, rpcport,)
node = AuthServiceProxy(url)
node.getinfo()
finally:
node = None # make sure connection will be garbage collected and closed
stop_nodes(nodes)
wait_bitcoinds()
def run_test(tmpdir):
assert(sys.platform == 'linux2') # due to OS-specific network stats queries, this test works only on Linux
# find the first non-loopback interface for testing
non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
non_loopback_ip = ip
break
if non_loopback_ip is None:
assert(not 'This test requires at least one non-loopback IPv4 interface')
print("Using interface %s for testing" % non_loopback_ip)
defaultport = rpc_port(0)
# check default without rpcallowip (IPv4 and IPv6 localhost)
run_bind_test(tmpdir, None, '127.0.0.1', [],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check default with rpcallowip (IPv6 any)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', [],
[('::0', defaultport)])
# check only IPv4 localhost (explicit)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', defaultport)])
# check only IPv4 localhost (explicit) with alternative port
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
# check only IPv6 localhost (explicit)
run_bind_test(tmpdir, ['[::1]'], '[::1]', ['[::1]'],
[('::1', defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check only non-loopback interface
run_bind_test(tmpdir, [non_loopback_ip], non_loopback_ip, [non_loopback_ip],
[(non_loopback_ip, defaultport)])
# Check that with invalid rpcallowip, we are denied
run_allowip_test(tmpdir, [non_loopback_ip], non_loopback_ip, defaultport)
try:
run_allowip_test(tmpdir, ['1.1.1.1'], non_loopback_ip, defaultport)
assert(not 'Connection not denied by rpcallowip as expected')
except ValueError:
pass
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing bitcoind/bitcoin-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
nodes = []
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
run_test(options.tmpdir)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not options.nocleanup:
print("Cleaning up")
wait_bitcoinds()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()
| mit |
SB-BISS/RLACOSarsaLambda | Mountain_Car_Grid_Search.py | 1 | 5705 | '''
This is a GRID search to find the best parameters of an algorithm.
In future developments it will be parallelized.
In the Mountain Car Problem, for Sarsa with Tile Coding, we
have the parameters from Sutton and Barto Book
alpha = 0.5 (then divided by num tilings, so it becomes 0.5/0.8, check the implementation of the agents)
decaying factor = 0.96
lambda = 0.96
Discretization = 8,8
'''
import gym
from gym import envs
from agents import TabularSarsaAgent
from agents import ApproximatedSarsaLambdaAgent
from agents import HAApproximatedSarsaLambdaAgent
from static_heuristics.MountainCarHeuristic import MountainCarHeuristic
from agents import StaticHeuristicApproximatedSarsaLambdaAgent
import numpy as np
import matplotlib.pyplot as plt
from gym_maze.envs.maze_env import *
import time
from model.mc_model import mc_model
import pickle
print(envs.registry.all())
env = gym.make("MountainCar-v0")
env._max_episode_steps = 1000
repetitions = 25
episodes = 20
env.reset()
obs_mins = env.observation_space.low
obs_maxs = env.observation_space.high #[env.observation_space[0].max_value, env.observation_space[1].max_value]
print obs_mins
print obs_maxs
discretizations = [10,10] #position and velocity.
num_tilings = 10
total_result = []
rend = False # render or not.
#values for Rho
rho_pos = [0.1,0.3,0.6,0.9,0.99] # [0.1,0.5,0.99] #3
#values for psi, for the heuristic
psi_pos = [0.001,0.01,0.1,0.3,0.5] # [0.001,0.01,0.1,0.3,0.5] # 5
#values of nu, for the heuristic
nu_pos = [1,5,10]
#values for discount factor
discount_pos = [1] # not discounted
lmbd = [0.9]# Lambda get the same value. Fixed, following Sutton and Barto book, but only for replacing traces...
alpha_pos = [0.5] #it becomes 0.5/8, given the num tilings above
eps_pos = [0.025] #decaying exploration
# one iteration of the grid search
algorithms = ["NOH","SH","H"]
Strategies = ["Replacing","TrueOnline"]
algo = algorithms[1]
strat = Strategies[1]
hard_soft = "soft"
model_based = True
z= 0 #counter
for eps in eps_pos:
for rho in rho_pos:
for psi in psi_pos:
for dis in discount_pos:
for nu in nu_pos:
for alpha in alpha_pos:
config = { "Strategy" : strat,
"Pheromone_strategy": hard_soft,
"decrease_exploration" : True, #Mountain Car has a decaying eploration
"learning_rate" : alpha,
"psi": psi,
"rho": rho,
"model" : mc_model(),
"static_heuristic": MountainCarHeuristic(model= mc_model(),actions_number=3),
"model_based":model_based,
"eps": eps,
"nu":nu, # Epsilon in epsilon greedy policies
"lambda":lmbd[0],
"discount": dis,
"n_iter": env._max_episode_steps}
times = np.zeros(episodes)
results = np.zeros(episodes)
print z
for j in range(repetitions): # this is to decide for the parameter
if algo=="NOH":
ag = ApproximatedSarsaLambdaAgent.ApproximatedSarsaLambdaAgent(obs_mins,obs_maxs,env.action_space,discretizations,[num_tilings], my_config=config)
elif algo =="SH":
ag = StaticHeuristicApproximatedSarsaLambdaAgent.StaticHeuristicApproximatedSarsaLambdaAgent(obs_mins,obs_maxs,env.action_space,discretizations,[num_tilings], my_config=config)
else:
ag = HAApproximatedSarsaLambdaAgent.HAApproximatedSarsaLambdaAgent(obs_mins,obs_maxs,env.action_space,discretizations,[num_tilings], my_config=config)
for i in range(episodes):
tb = time.time()
ag.learn(env,rend)
te = time.time()
tdiff= te-tb
res= ag.return_last_steps()
results[i] = results[i]+res[i]
print res[i]
times[i] = times[i] + tdiff
print i
#print (res[-1], [eps,rho,psi,dis,dis,alpha])
#in the maze grid search you are looking for the one with the smallest cumulative_sum
total_result.append({"parameters": [eps,rho,psi,dis,lmbd,alpha,nu] , "times":times/repetitions, "20thep": results[-1]/repetitions, "results":results/repetitions, "cumulative_sum": np.sum(results/repetitions)})
# env.step(env.action_space.sample()) # take a random action
z = z+1
with open("Mountain_car_"+algo+"_" + strat + "_" + hard_soft + "model"+str(model_based)+ ".pkl", 'wb') as f:
pickle.dump(total_result, f)
#Saving the result of the GRID Search
| mit |
BeegorMif/HTPC-Manager | setup.py | 1 | 8776 | import re
import urllib, ConfigParser
from distutils.core import setup
import py2exe, sys, os, shutil, datetime, zipfile, subprocess, fnmatch
import googlecode_upload
from lib.pygithub import github
# mostly stolen from the SABnzbd package.py file
name = 'SickBeard'
version = '0.1'
release = name + '-' + version
Win32ConsoleName = 'SickBeard-console.exe'
Win32WindowName = 'SickBeard.exe'
def findLatestBuild():
regex = "http\://sickbeard\.googlecode\.com/files/SickBeard\-win32\-alpha\-build(\d+)(?:\.\d+)?\.zip"
svnFile = urllib.urlopen("http://code.google.com/p/sickbeard/downloads/list")
for curLine in svnFile.readlines():
match = re.search(regex, curLine)
if match:
groups = match.groups()
return int(groups[0])
return None
def recursive_find_data_files(root_dir, allowed_extensions=('*')):
to_return = {}
for (dirpath, dirnames, filenames) in os.walk(root_dir):
if not filenames:
continue
for cur_filename in filenames:
matches_pattern = False
for cur_pattern in allowed_extensions:
if fnmatch.fnmatch(cur_filename, '*.'+cur_pattern):
matches_pattern = True
if not matches_pattern:
continue
cur_filepath = os.path.join(dirpath, cur_filename)
to_return.setdefault(dirpath, []).append(cur_filepath)
return sorted(to_return.items())
def find_all_libraries(root_dirs):
libs = []
for cur_root_dir in root_dirs:
for (dirpath, dirnames, filenames) in os.walk(cur_root_dir):
if '__init__.py' not in filenames:
continue
libs.append(dirpath.replace(os.sep, '.'))
return libs
def allFiles(dir):
files = []
for file in os.listdir(dir):
fullFile = os.path.join(dir, file)
if os.path.isdir(fullFile):
files += allFiles(fullFile)
else:
files.append(fullFile)
return files
# save the original arguments and replace them with the py2exe args
oldArgs = []
if len(sys.argv) > 1:
oldArgs = sys.argv[1:]
del sys.argv[1:]
sys.argv.append('py2exe')
# clear the dist dir
if os.path.isdir('dist'):
shutil.rmtree('dist')
# root source dir
compile_dir = os.path.dirname(os.path.normpath(os.path.abspath(sys.argv[0])))
if not 'nopull' in oldArgs:
# pull new source from git
print 'Updating source from git'
p = subprocess.Popen('git pull origin master', shell=True, cwd=compile_dir)
o,e = p.communicate()
# figure out what build this is going to be
latestBuild = findLatestBuild()
if 'test' in oldArgs:
currentBuildNumber = str(latestBuild)+'a'
else:
currentBuildNumber = latestBuild+1
# write the version file before we compile
versionFile = open("sickbeard/version.py", "w")
versionFile.write("SICKBEARD_VERSION = \"build "+str(currentBuildNumber)+"\"")
versionFile.close()
# set up the compilation options
data_files = recursive_find_data_files('data', ['gif', 'png', 'jpg', 'ico', 'js', 'css', 'tmpl'])
options = dict(
name=name,
version=release,
author='Nic Wolfe',
author_email='nic@wolfeden.ca',
description=name + ' ' + release,
scripts=['SickBeard.py'],
packages=find_all_libraries(['sickbeard', 'lib']),
)
# set up py2exe to generate the console app
program = [ {'script': 'SickBeard.py' } ]
options['options'] = {'py2exe':
{
'bundle_files': 3,
'packages': ['Cheetah'],
'excludes': ['Tkconstants', 'Tkinter', 'tcl'],
'optimize': 2,
'compressed': 0
}
}
options['zipfile'] = 'lib/sickbeard.zip'
options['console'] = program
options['data_files'] = data_files
# compile sickbeard-console.exe
setup(**options)
# rename the exe to sickbeard-console.exe
try:
if os.path.exists("dist/%s" % Win32ConsoleName):
os.remove("dist/%s" % Win32ConsoleName)
os.rename("dist/%s" % Win32WindowName, "dist/%s" % Win32ConsoleName)
except:
print "Cannot create dist/%s" % Win32ConsoleName
#sys.exit(1)
# we don't need this stuff when we make the 2nd exe
del options['console']
del options['data_files']
options['windows'] = program
# compile sickbeard.exe
setup(**options)
# compile sabToSickbeard.exe using the existing setup.py script
auto_process_dir = os.path.join(compile_dir, 'autoProcessTV')
p = subprocess.Popen([ sys.executable, os.path.join(auto_process_dir, 'setup.py') ], cwd=auto_process_dir, shell=True)
o,e = p.communicate()
# copy autoProcessTV files to the dist dir
auto_process_files = ['autoProcessTV/sabToSickBeard.py',
'autoProcessTV/hellaToSickBeard.py',
'autoProcessTV/autoProcessTV.py',
'autoProcessTV/autoProcessTV.cfg.sample',
'autoProcessTV/sabToSickBeard.exe']
os.makedirs('dist/autoProcessTV')
for curFile in auto_process_files:
newFile = os.path.join('dist', curFile)
print "Copying file from", curFile, "to", newFile
shutil.copy(curFile, newFile)
# compile updater.exe
setup(
options = {'py2exe': {'bundle_files': 1}},
zipfile = None,
console = ['updater.py'], requires=['Cheetah']
)
if 'test' in oldArgs:
print "Ignoring changelog for test build"
else:
# start building the CHANGELOG.txt
print 'Creating changelog'
gh = github.GitHub()
# read the old changelog and find the last commit from that build
lastCommit = ""
try:
cl = open("CHANGELOG.txt", "r")
lastCommit = cl.readlines()[0].strip()
cl.close()
except:
print "I guess there's no changelog"
newestCommit = ""
changeString = ""
# cycle through all the git commits and save their commit messages
for curCommit in gh.commits.forBranch('midgetspy', 'Sick-Beard'):
if curCommit.id == lastCommit:
break
if newestCommit == "":
newestCommit = curCommit.id
changeString += curCommit.message + "\n\n"
# if we didn't find any changes don't make a changelog file
if newestCommit != "":
newChangelog = open("CHANGELOG.txt", "w")
newChangelog.write(newestCommit+"\n\n")
newChangelog.write("Changelog for build "+str(currentBuildNumber)+"\n\n")
newChangelog.write(changeString)
newChangelog.close()
else:
print "No changes found, keeping old changelog"
# put the changelog in the compile dir
if os.path.exists("CHANGELOG.txt"):
shutil.copy('CHANGELOG.txt', 'dist/')
# figure out what we're going to call the zip file
print 'Zipping files...'
zipFilename = 'SickBeard-win32-alpha-build'+str(currentBuildNumber)
if os.path.isfile(zipFilename + '.zip'):
zipNum = 2
while os.path.isfile(zipFilename + '.{0:0>2}.zip'.format(str(zipNum))):
zipNum += 1
zipFilename = zipFilename + '.{0:0>2}'.format(str(zipNum))
# get a list of files to add to the zip
zipFileList = allFiles('dist/')
# add all files to the zip
z = zipfile.ZipFile(zipFilename + '.zip', 'w', zipfile.ZIP_DEFLATED)
for file in zipFileList:
z.write(file, file.replace('dist/', zipFilename + '/'))
z.close()
print "Created zip at", zipFilename
# leave version file as it is in source
print "Reverting version file to master"
versionFile = open("sickbeard/version.py", "w")
versionFile.write("SICKBEARD_VERSION = \"master\"")
versionFile.close()
# i store my google code username/pw in a config so i can have this file in public source control
config = ConfigParser.ConfigParser()
configFilename = os.path.join(compile_dir, "gc.ini")
config.read(configFilename)
gc_username = config.get("GC", "username")
gc_password = config.get("GC", "password")
# upload to google code unless I tell it not to
if "noup" not in oldArgs and "test" not in oldArgs:
print "Uploading zip to google code"
googlecode_upload.upload(os.path.abspath(zipFilename+".zip"), "sickbeard", gc_username, gc_password, "Win32 alpha build "+str(currentBuildNumber)+" (unstable/development release)", ["Featured","Type-Executable","OpSys-Windows"])
if 'nopush' not in oldArgs and 'test' not in oldArgs:
# tag commit as a new build and push changes to github
print 'Tagging commit and pushing'
p = subprocess.Popen('git tag -a "build-'+str(currentBuildNumber)+'" -m "Windows build '+zipFilename+'"', shell=True, cwd=compile_dir)
o,e = p.communicate()
p = subprocess.Popen('git push --tags origin windows_binaries', shell=True, cwd=compile_dir)
o,e = p.communicate()
| gpl-3.0 |
barraponto/scrapy | tests/mockserver.py | 15 | 6998 | from __future__ import print_function
import sys, time, random, os, json
from six.moves.urllib.parse import urlencode
from subprocess import Popen, PIPE
from twisted.web.server import Site, NOT_DONE_YET
from twisted.web.resource import Resource
from twisted.internet import reactor, defer, ssl
from scrapy import twisted_version
from scrapy.utils.python import to_bytes, to_unicode
if twisted_version < (11, 0, 0):
def deferLater(clock, delay, func, *args, **kw):
def _cancel_method():
_cancel_cb(None)
d.errback(Exception())
def _cancel_cb(result):
if cl.active():
cl.cancel()
return result
d = defer.Deferred()
d.cancel = _cancel_method
d.addCallback(lambda ignored: func(*args, **kw))
d.addBoth(_cancel_cb)
cl = clock.callLater(delay, d.callback, None)
return d
else:
from twisted.internet.task import deferLater
def getarg(request, name, default=None, type=None):
if name in request.args:
value = request.args[name][0]
if type is not None:
value = type(value)
return value
else:
return default
class LeafResource(Resource):
isLeaf = True
def deferRequest(self, request, delay, f, *a, **kw):
def _cancelrequest(_):
# silence CancelledError
d.addErrback(lambda _: None)
d.cancel()
d = deferLater(reactor, delay, f, *a, **kw)
request.notifyFinish().addErrback(_cancelrequest)
return d
class Follow(LeafResource):
def render(self, request):
total = getarg(request, b"total", 100, type=int)
show = getarg(request, b"show", 1, type=int)
order = getarg(request, b"order", b"desc")
maxlatency = getarg(request, b"maxlatency", 0, type=float)
n = getarg(request, b"n", total, type=int)
if order == b"rand":
nlist = [random.randint(1, total) for _ in range(show)]
else: # order == "desc"
nlist = range(n, max(n - show, 0), -1)
lag = random.random() * maxlatency
self.deferRequest(request, lag, self.renderRequest, request, nlist)
return NOT_DONE_YET
def renderRequest(self, request, nlist):
s = """<html> <head></head> <body>"""
args = request.args.copy()
for nl in nlist:
args[b"n"] = [to_bytes(str(nl))]
argstr = urlencode(args, doseq=True)
s += "<a href='/follow?%s'>follow %d</a><br>" % (argstr, nl)
s += """</body>"""
request.write(to_bytes(s))
request.finish()
class Delay(LeafResource):
def render_GET(self, request):
n = getarg(request, b"n", 1, type=float)
b = getarg(request, b"b", 1, type=int)
if b:
# send headers now and delay body
request.write('')
self.deferRequest(request, n, self._delayedRender, request, n)
return NOT_DONE_YET
def _delayedRender(self, request, n):
request.write(to_bytes("Response delayed for %0.3f seconds\n" % n))
request.finish()
class Status(LeafResource):
def render_GET(self, request):
n = getarg(request, b"n", 200, type=int)
request.setResponseCode(n)
return b""
class Raw(LeafResource):
def render_GET(self, request):
request.startedWriting = 1
self.deferRequest(request, 0, self._delayedRender, request)
return NOT_DONE_YET
render_POST = render_GET
def _delayedRender(self, request):
raw = getarg(request, b'raw', b'HTTP 1.1 200 OK\n')
request.startedWriting = 1
request.write(raw)
request.channel.transport.loseConnection()
request.finish()
class Echo(LeafResource):
def render_GET(self, request):
output = {
'headers': dict(
(to_unicode(k), [to_unicode(v) for v in vs])
for k, vs in request.requestHeaders.getAllRawHeaders()),
'body': to_unicode(request.content.read()),
}
return to_bytes(json.dumps(output))
class Partial(LeafResource):
def render_GET(self, request):
request.setHeader(b"Content-Length", b"1024")
self.deferRequest(request, 0, self._delayedRender, request)
return NOT_DONE_YET
def _delayedRender(self, request):
request.write(b"partial content\n")
request.finish()
class Drop(Partial):
def _delayedRender(self, request):
abort = getarg(request, b"abort", 0, type=int)
request.write(b"this connection will be dropped\n")
tr = request.channel.transport
try:
if abort and hasattr(tr, 'abortConnection'):
tr.abortConnection()
else:
tr.loseConnection()
finally:
request.finish()
class Root(Resource):
def __init__(self):
Resource.__init__(self)
self.putChild(b"status", Status())
self.putChild(b"follow", Follow())
self.putChild(b"delay", Delay())
self.putChild(b"partial", Partial())
self.putChild(b"drop", Drop())
self.putChild(b"raw", Raw())
self.putChild(b"echo", Echo())
if twisted_version > (12, 3, 0):
from twisted.web.test.test_webclient import PayloadResource
from twisted.web.server import GzipEncoderFactory
from twisted.web.resource import EncodingResourceWrapper
self.putChild(b"payload", PayloadResource())
self.putChild(b"xpayload", EncodingResourceWrapper(PayloadResource(), [GzipEncoderFactory()]))
def getChild(self, name, request):
return self
def render(self, request):
return b'Scrapy mock HTTP server\n'
class MockServer():
def __enter__(self):
from scrapy.utils.test import get_testenv
self.proc = Popen([sys.executable, '-u', '-m', 'tests.mockserver'],
stdout=PIPE, env=get_testenv())
self.proc.stdout.readline()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.proc.kill()
self.proc.wait()
time.sleep(0.2)
def ssl_context_factory(keyfile='keys/cert.pem', certfile='keys/cert.pem'):
return ssl.DefaultOpenSSLContextFactory(
os.path.join(os.path.dirname(__file__), keyfile),
os.path.join(os.path.dirname(__file__), certfile),
)
if __name__ == "__main__":
root = Root()
factory = Site(root)
httpPort = reactor.listenTCP(8998, factory)
contextFactory = ssl_context_factory()
httpsPort = reactor.listenSSL(8999, factory, contextFactory)
def print_listening():
httpHost = httpPort.getHost()
httpsHost = httpsPort.getHost()
print("Mock server running at http://%s:%d and https://%s:%d" % (
httpHost.host, httpHost.port, httpsHost.host, httpsHost.port))
reactor.callWhenRunning(print_listening)
reactor.run()
| bsd-3-clause |
thatchristoph/namebench | nb_third_party/dns/rdtypes/IN/APL.py | 248 | 5525 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import struct
import dns.exception
import dns.inet
import dns.rdata
import dns.tokenizer
class APLItem(object):
"""An APL list item.
@ivar family: the address family (IANA address family registry)
@type family: int
@ivar negation: is this item negated?
@type negation: bool
@ivar address: the address
@type address: string
@ivar prefix: the prefix length
@type prefix: int
"""
__slots__ = ['family', 'negation', 'address', 'prefix']
def __init__(self, family, negation, address, prefix):
self.family = family
self.negation = negation
self.address = address
self.prefix = prefix
def __str__(self):
if self.negation:
return "!%d:%s/%s" % (self.family, self.address, self.prefix)
else:
return "%d:%s/%s" % (self.family, self.address, self.prefix)
def to_wire(self, file):
if self.family == 1:
address = dns.inet.inet_pton(dns.inet.AF_INET, self.address)
elif self.family == 2:
address = dns.inet.inet_pton(dns.inet.AF_INET6, self.address)
else:
address = self.address.decode('hex_codec')
#
# Truncate least significant zero bytes.
#
last = 0
for i in xrange(len(address) - 1, -1, -1):
if address[i] != chr(0):
last = i + 1
break
address = address[0 : last]
l = len(address)
assert l < 128
if self.negation:
l |= 0x80
header = struct.pack('!HBB', self.family, self.prefix, l)
file.write(header)
file.write(address)
class APL(dns.rdata.Rdata):
"""APL record.
@ivar items: a list of APL items
@type items: list of APL_Item
@see: RFC 3123"""
__slots__ = ['items']
def __init__(self, rdclass, rdtype, items):
super(APL, self).__init__(rdclass, rdtype)
self.items = items
def to_text(self, origin=None, relativize=True, **kw):
return ' '.join(map(lambda x: str(x), self.items))
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
items = []
while 1:
token = tok.get().unescape()
if token.is_eol_or_eof():
break
item = token.value
if item[0] == '!':
negation = True
item = item[1:]
else:
negation = False
(family, rest) = item.split(':', 1)
family = int(family)
(address, prefix) = rest.split('/', 1)
prefix = int(prefix)
item = APLItem(family, negation, address, prefix)
items.append(item)
return cls(rdclass, rdtype, items)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
for item in self.items:
item.to_wire(file)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
items = []
while 1:
if rdlen < 4:
raise dns.exception.FormError
header = struct.unpack('!HBB', wire[current : current + 4])
afdlen = header[2]
if afdlen > 127:
negation = True
afdlen -= 128
else:
negation = False
current += 4
rdlen -= 4
if rdlen < afdlen:
raise dns.exception.FormError
address = wire[current : current + afdlen]
l = len(address)
if header[0] == 1:
if l < 4:
address += '\x00' * (4 - l)
address = dns.inet.inet_ntop(dns.inet.AF_INET, address)
elif header[0] == 2:
if l < 16:
address += '\x00' * (16 - l)
address = dns.inet.inet_ntop(dns.inet.AF_INET6, address)
else:
#
# This isn't really right according to the RFC, but it
# seems better than throwing an exception
#
address = address.encode('hex_codec')
current += afdlen
rdlen -= afdlen
item = APLItem(header[0], negation, address, header[1])
items.append(item)
if rdlen == 0:
break
return cls(rdclass, rdtype, items)
from_wire = classmethod(from_wire)
def _cmp(self, other):
f = cStringIO.StringIO()
self.to_wire(f)
wire1 = f.getvalue()
f.seek(0)
f.truncate()
other.to_wire(f)
wire2 = f.getvalue()
f.close()
return cmp(wire1, wire2)
| apache-2.0 |
evansd/whitenoise | docs/conf.py | 1 | 8679 | # flake8: noqa
# -*- coding: utf-8 -*-
#
# WhiteNoise documentation build configuration file, created by
# sphinx-quickstart on Sun Aug 11 15:22:49 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
def get_version():
import ast, os, re
filename = os.path.join(os.path.dirname(__file__), "../whitenoise/__init__.py")
with open(filename, "rb") as f:
contents = f.read().decode("utf-8")
version_string = re.search(r"__version__\s+=\s+(.*)", contents).group(1)
return str(ast.literal_eval(version_string))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.extlinks"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"WhiteNoise"
copyright = u"2013-{}, David Evans".format(datetime.datetime.today().year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = get_version()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if os.environ.get("READTHEDOCS", None) == "True":
html_theme = "default"
else:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "WhiteNoisedoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "WhiteNoise.tex", u"WhiteNoise Documentation", u"David Evans", "manual")
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "whitenoise", u"WhiteNoise Documentation", [u"David Evans"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"WhiteNoise",
u"WhiteNoise Documentation",
u"David Evans",
"WhiteNoise",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
git_tag = "v{}".format(version) if version != "development" else "master"
github_base_url = "https://github.com/evansd/whitenoise/blob/{}/".format(git_tag)
extlinks = {"file": (github_base_url + "%s", "")}
| mit |
midori1/midorinoblog | site-packages/django/core/cache/backends/db.py | 80 | 8853 | "Database cache backend."
import base64
from datetime import datetime
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
from django.conf import settings
from django.core.cache.backends.base import BaseCache, DEFAULT_TIMEOUT
from django.db import connections, transaction, router, DatabaseError
from django.db.backends.utils import typecast_timestamp
from django.utils import timezone, six
from django.utils.encoding import force_bytes
class Options(object):
"""A class that will quack like a Django model _meta class.
This allows cache operations to be controlled by the router
"""
def __init__(self, table):
self.db_table = table
self.app_label = 'django_cache'
self.model_name = 'cacheentry'
self.verbose_name = 'cache entry'
self.verbose_name_plural = 'cache entries'
self.object_name = 'CacheEntry'
self.abstract = False
self.managed = True
self.proxy = False
class BaseDatabaseCache(BaseCache):
def __init__(self, table, params):
BaseCache.__init__(self, params)
self._table = table
class CacheEntry(object):
_meta = Options(table)
self.cache_model_class = CacheEntry
class DatabaseCache(BaseDatabaseCache):
# This class uses cursors provided by the database connection. This means
# it reads expiration values as aware or naive datetimes depending on the
# value of USE_TZ. They must be compared to aware or naive representations
# of "now" respectively.
# But it bypasses the ORM for write operations. As a consequence, aware
# datetimes aren't made naive for databases that don't support time zones.
# We work around this problem by always using naive datetimes when writing
# expiration values, in UTC when USE_TZ = True and in local time otherwise.
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
db = router.db_for_read(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
with connections[db].cursor() as cursor:
cursor.execute("SELECT cache_key, value, expires FROM %s "
"WHERE cache_key = %%s" % table, [key])
row = cursor.fetchone()
if row is None:
return default
now = timezone.now()
expires = row[2]
if connections[db].features.needs_datetime_string_cast and not isinstance(expires, datetime):
# Note: typecasting is needed by some 3rd party database backends.
# All core backends work without typecasting, so be careful about
# changes here - test suite will NOT pick regressions here.
expires = typecast_timestamp(str(expires))
if expires < now:
db = router.db_for_write(self.cache_model_class)
with connections[db].cursor() as cursor:
cursor.execute("DELETE FROM %s "
"WHERE cache_key = %%s" % table, [key])
return default
value = connections[db].ops.process_clob(row[1])
return pickle.loads(base64.b64decode(force_bytes(value)))
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
self._base_set('set', key, value, timeout)
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return self._base_set('add', key, value, timeout)
def _base_set(self, mode, key, value, timeout=DEFAULT_TIMEOUT):
timeout = self.get_backend_timeout(timeout)
db = router.db_for_write(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
with connections[db].cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM %s" % table)
num = cursor.fetchone()[0]
now = timezone.now()
now = now.replace(microsecond=0)
if timeout is None:
exp = datetime.max
elif settings.USE_TZ:
exp = datetime.utcfromtimestamp(timeout)
else:
exp = datetime.fromtimestamp(timeout)
exp = exp.replace(microsecond=0)
if num > self._max_entries:
self._cull(db, cursor, now)
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
b64encoded = base64.b64encode(pickled)
# The DB column is expecting a string, so make sure the value is a
# string, not bytes. Refs #19274.
if six.PY3:
b64encoded = b64encoded.decode('latin1')
try:
# Note: typecasting for datetimes is needed by some 3rd party
# database backends. All core backends work without typecasting,
# so be careful about changes here - test suite will NOT pick
# regressions.
with transaction.atomic(using=db):
cursor.execute("SELECT cache_key, expires FROM %s "
"WHERE cache_key = %%s" % table, [key])
result = cursor.fetchone()
if result:
current_expires = result[1]
if (connections[db].features.needs_datetime_string_cast and not
isinstance(current_expires, datetime)):
current_expires = typecast_timestamp(str(current_expires))
exp = connections[db].ops.value_to_db_datetime(exp)
if result and (mode == 'set' or (mode == 'add' and current_expires < now)):
cursor.execute("UPDATE %s SET value = %%s, expires = %%s "
"WHERE cache_key = %%s" % table,
[b64encoded, exp, key])
else:
cursor.execute("INSERT INTO %s (cache_key, value, expires) "
"VALUES (%%s, %%s, %%s)" % table,
[key, b64encoded, exp])
except DatabaseError:
# To be threadsafe, updates/inserts are allowed to fail silently
return False
else:
return True
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
db = router.db_for_write(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
with connections[db].cursor() as cursor:
cursor.execute("DELETE FROM %s WHERE cache_key = %%s" % table, [key])
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
db = router.db_for_read(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
if settings.USE_TZ:
now = datetime.utcnow()
else:
now = datetime.now()
now = now.replace(microsecond=0)
with connections[db].cursor() as cursor:
cursor.execute("SELECT cache_key FROM %s "
"WHERE cache_key = %%s and expires > %%s" % table,
[key, connections[db].ops.value_to_db_datetime(now)])
return cursor.fetchone() is not None
def _cull(self, db, cursor, now):
if self._cull_frequency == 0:
self.clear()
else:
# When USE_TZ is True, 'now' will be an aware datetime in UTC.
now = now.replace(tzinfo=None)
table = connections[db].ops.quote_name(self._table)
cursor.execute("DELETE FROM %s WHERE expires < %%s" % table,
[connections[db].ops.value_to_db_datetime(now)])
cursor.execute("SELECT COUNT(*) FROM %s" % table)
num = cursor.fetchone()[0]
if num > self._max_entries:
cull_num = num // self._cull_frequency
cursor.execute(
connections[db].ops.cache_key_culling_sql() % table,
[cull_num])
cursor.execute("DELETE FROM %s "
"WHERE cache_key < %%s" % table,
[cursor.fetchone()[0]])
def clear(self):
db = router.db_for_write(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
with connections[db].cursor() as cursor:
cursor.execute('DELETE FROM %s' % table)
# For backwards compatibility
class CacheClass(DatabaseCache):
pass
| apache-2.0 |
blackbliss/callme | flask/lib/python2.7/site-packages/setuptools/sandbox.py | 221 | 9994 | import os
import sys
import tempfile
import operator
import functools
import itertools
import re
import pkg_resources
if os.name == "java":
import org.python.modules.posix.PosixModule as _os
else:
_os = sys.modules[os.name]
try:
_file = file
except NameError:
_file = None
_open = open
from distutils.errors import DistutilsError
from pkg_resources import working_set
from setuptools.compat import builtins, execfile
__all__ = [
"AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup",
]
def run_setup(setup_script, args):
"""Run a distutils setup script, sandboxed in its directory"""
old_dir = os.getcwd()
save_argv = sys.argv[:]
save_path = sys.path[:]
setup_dir = os.path.abspath(os.path.dirname(setup_script))
temp_dir = os.path.join(setup_dir,'temp')
if not os.path.isdir(temp_dir): os.makedirs(temp_dir)
save_tmp = tempfile.tempdir
save_modules = sys.modules.copy()
pr_state = pkg_resources.__getstate__()
try:
tempfile.tempdir = temp_dir
os.chdir(setup_dir)
try:
sys.argv[:] = [setup_script]+list(args)
sys.path.insert(0, setup_dir)
# reset to include setup dir, w/clean callback list
working_set.__init__()
working_set.callbacks.append(lambda dist:dist.activate())
DirectorySandbox(setup_dir).run(
lambda: execfile(
"setup.py",
{'__file__':setup_script, '__name__':'__main__'}
)
)
except SystemExit:
v = sys.exc_info()[1]
if v.args and v.args[0]:
raise
# Normal exit, just return
finally:
pkg_resources.__setstate__(pr_state)
sys.modules.update(save_modules)
# remove any modules imported within the sandbox
del_modules = [
mod_name for mod_name in sys.modules
if mod_name not in save_modules
# exclude any encodings modules. See #285
and not mod_name.startswith('encodings.')
]
list(map(sys.modules.__delitem__, del_modules))
os.chdir(old_dir)
sys.path[:] = save_path
sys.argv[:] = save_argv
tempfile.tempdir = save_tmp
class AbstractSandbox:
"""Wrap 'os' module and 'open()' builtin for virtualizing setup scripts"""
_active = False
def __init__(self):
self._attrs = [
name for name in dir(_os)
if not name.startswith('_') and hasattr(self,name)
]
def _copy(self, source):
for name in self._attrs:
setattr(os, name, getattr(source,name))
def run(self, func):
"""Run 'func' under os sandboxing"""
try:
self._copy(self)
if _file:
builtins.file = self._file
builtins.open = self._open
self._active = True
return func()
finally:
self._active = False
if _file:
builtins.file = _file
builtins.open = _open
self._copy(_os)
def _mk_dual_path_wrapper(name):
original = getattr(_os,name)
def wrap(self,src,dst,*args,**kw):
if self._active:
src,dst = self._remap_pair(name,src,dst,*args,**kw)
return original(src,dst,*args,**kw)
return wrap
for name in ["rename", "link", "symlink"]:
if hasattr(_os,name): locals()[name] = _mk_dual_path_wrapper(name)
def _mk_single_path_wrapper(name, original=None):
original = original or getattr(_os,name)
def wrap(self,path,*args,**kw):
if self._active:
path = self._remap_input(name,path,*args,**kw)
return original(path,*args,**kw)
return wrap
if _file:
_file = _mk_single_path_wrapper('file', _file)
_open = _mk_single_path_wrapper('open', _open)
for name in [
"stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir",
"remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat",
"startfile", "mkfifo", "mknod", "pathconf", "access"
]:
if hasattr(_os,name): locals()[name] = _mk_single_path_wrapper(name)
def _mk_single_with_return(name):
original = getattr(_os,name)
def wrap(self,path,*args,**kw):
if self._active:
path = self._remap_input(name,path,*args,**kw)
return self._remap_output(name, original(path,*args,**kw))
return original(path,*args,**kw)
return wrap
for name in ['readlink', 'tempnam']:
if hasattr(_os,name): locals()[name] = _mk_single_with_return(name)
def _mk_query(name):
original = getattr(_os,name)
def wrap(self,*args,**kw):
retval = original(*args,**kw)
if self._active:
return self._remap_output(name, retval)
return retval
return wrap
for name in ['getcwd', 'tmpnam']:
if hasattr(_os,name): locals()[name] = _mk_query(name)
def _validate_path(self,path):
"""Called to remap or validate any path, whether input or output"""
return path
def _remap_input(self,operation,path,*args,**kw):
"""Called for path inputs"""
return self._validate_path(path)
def _remap_output(self,operation,path):
"""Called for path outputs"""
return self._validate_path(path)
def _remap_pair(self,operation,src,dst,*args,**kw):
"""Called for path pairs like rename, link, and symlink operations"""
return (
self._remap_input(operation+'-from',src,*args,**kw),
self._remap_input(operation+'-to',dst,*args,**kw)
)
if hasattr(os, 'devnull'):
_EXCEPTIONS = [os.devnull,]
else:
_EXCEPTIONS = []
try:
from win32com.client.gencache import GetGeneratePath
_EXCEPTIONS.append(GetGeneratePath())
del GetGeneratePath
except ImportError:
# it appears pywin32 is not installed, so no need to exclude.
pass
class DirectorySandbox(AbstractSandbox):
"""Restrict operations to a single subdirectory - pseudo-chroot"""
write_ops = dict.fromkeys([
"open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir",
"utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam",
])
_exception_patterns = [
# Allow lib2to3 to attempt to save a pickled grammar object (#121)
'.*lib2to3.*\.pickle$',
]
"exempt writing to paths that match the pattern"
def __init__(self, sandbox, exceptions=_EXCEPTIONS):
self._sandbox = os.path.normcase(os.path.realpath(sandbox))
self._prefix = os.path.join(self._sandbox,'')
self._exceptions = [
os.path.normcase(os.path.realpath(path))
for path in exceptions
]
AbstractSandbox.__init__(self)
def _violation(self, operation, *args, **kw):
raise SandboxViolation(operation, args, kw)
if _file:
def _file(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("file", path, mode, *args, **kw)
return _file(path,mode,*args,**kw)
def _open(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("open", path, mode, *args, **kw)
return _open(path,mode,*args,**kw)
def tmpnam(self):
self._violation("tmpnam")
def _ok(self, path):
active = self._active
try:
self._active = False
realpath = os.path.normcase(os.path.realpath(path))
return (
self._exempted(realpath)
or realpath == self._sandbox
or realpath.startswith(self._prefix)
)
finally:
self._active = active
def _exempted(self, filepath):
start_matches = (
filepath.startswith(exception)
for exception in self._exceptions
)
pattern_matches = (
re.match(pattern, filepath)
for pattern in self._exception_patterns
)
candidates = itertools.chain(start_matches, pattern_matches)
return any(candidates)
def _remap_input(self, operation, path, *args, **kw):
"""Called for path inputs"""
if operation in self.write_ops and not self._ok(path):
self._violation(operation, os.path.realpath(path), *args, **kw)
return path
def _remap_pair(self, operation, src, dst, *args, **kw):
"""Called for path pairs like rename, link, and symlink operations"""
if not self._ok(src) or not self._ok(dst):
self._violation(operation, src, dst, *args, **kw)
return (src,dst)
def open(self, file, flags, mode=0x1FF, *args, **kw): # 0777
"""Called for low-level os.open()"""
if flags & WRITE_FLAGS and not self._ok(file):
self._violation("os.open", file, flags, mode, *args, **kw)
return _os.open(file,flags,mode, *args, **kw)
WRITE_FLAGS = functools.reduce(
operator.or_, [getattr(_os, a, 0) for a in
"O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()]
)
class SandboxViolation(DistutilsError):
"""A setup script attempted to modify the filesystem outside the sandbox"""
def __str__(self):
return """SandboxViolation: %s%r %s
The package setup script has attempted to modify files on your system
that are not within the EasyInstall build area, and has been aborted.
This package cannot be safely installed by EasyInstall, and may not
support alternate installation locations even if you run its setup
script by hand. Please inform the package's author and the EasyInstall
maintainers to find out if a fix or workaround is available.""" % self.args
#
| mit |
LethusTI/supportcenter | vendor/django/django/views/generic/dates.py | 80 | 19895 | import datetime
from django.db import models
from django.core.exceptions import ImproperlyConfigured
from django.http import Http404
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext as _
from django.utils import timezone
from django.views.generic.base import View
from django.views.generic.detail import BaseDetailView, SingleObjectTemplateResponseMixin
from django.views.generic.list import MultipleObjectMixin, MultipleObjectTemplateResponseMixin
class YearMixin(object):
year_format = '%Y'
year = None
def get_year_format(self):
"""
Get a year format string in strptime syntax to be used to parse the
year from url variables.
"""
return self.year_format
def get_year(self):
"Return the year for which this view should display data"
year = self.year
if year is None:
try:
year = self.kwargs['year']
except KeyError:
try:
year = self.request.GET['year']
except KeyError:
raise Http404(_(u"No year specified"))
return year
class MonthMixin(object):
month_format = '%b'
month = None
def get_month_format(self):
"""
Get a month format string in strptime syntax to be used to parse the
month from url variables.
"""
return self.month_format
def get_month(self):
"Return the month for which this view should display data"
month = self.month
if month is None:
try:
month = self.kwargs['month']
except KeyError:
try:
month = self.request.GET['month']
except KeyError:
raise Http404(_(u"No month specified"))
return month
def get_next_month(self, date):
"""
Get the next valid month.
"""
first_day, last_day = _month_bounds(date)
next = (last_day + datetime.timedelta(days=1)).replace(day=1)
return _get_next_prev_month(self, next, is_previous=False, use_first_day=True)
def get_previous_month(self, date):
"""
Get the previous valid month.
"""
first_day, last_day = _month_bounds(date)
prev = (first_day - datetime.timedelta(days=1))
return _get_next_prev_month(self, prev, is_previous=True, use_first_day=True)
class DayMixin(object):
day_format = '%d'
day = None
def get_day_format(self):
"""
Get a day format string in strptime syntax to be used to parse the day
from url variables.
"""
return self.day_format
def get_day(self):
"Return the day for which this view should display data"
day = self.day
if day is None:
try:
day = self.kwargs['day']
except KeyError:
try:
day = self.request.GET['day']
except KeyError:
raise Http404(_(u"No day specified"))
return day
def get_next_day(self, date):
"""
Get the next valid day.
"""
next = date + datetime.timedelta(days=1)
return _get_next_prev_month(self, next, is_previous=False, use_first_day=False)
def get_previous_day(self, date):
"""
Get the previous valid day.
"""
prev = date - datetime.timedelta(days=1)
return _get_next_prev_month(self, prev, is_previous=True, use_first_day=False)
class WeekMixin(object):
week_format = '%U'
week = None
def get_week_format(self):
"""
Get a week format string in strptime syntax to be used to parse the
week from url variables.
"""
return self.week_format
def get_week(self):
"Return the week for which this view should display data"
week = self.week
if week is None:
try:
week = self.kwargs['week']
except KeyError:
try:
week = self.request.GET['week']
except KeyError:
raise Http404(_(u"No week specified"))
return week
class DateMixin(object):
"""
Mixin class for views manipulating date-based data.
"""
date_field = None
allow_future = False
def get_date_field(self):
"""
Get the name of the date field to be used to filter by.
"""
if self.date_field is None:
raise ImproperlyConfigured(u"%s.date_field is required." % self.__class__.__name__)
return self.date_field
def get_allow_future(self):
"""
Returns `True` if the view should be allowed to display objects from
the future.
"""
return self.allow_future
class BaseDateListView(MultipleObjectMixin, DateMixin, View):
"""
Abstract base class for date-based views display a list of objects.
"""
allow_empty = False
def get(self, request, *args, **kwargs):
self.date_list, self.object_list, extra_context = self.get_dated_items()
context = self.get_context_data(object_list=self.object_list,
date_list=self.date_list)
context.update(extra_context)
return self.render_to_response(context)
def get_dated_items(self):
"""
Obtain the list of dates and itesm
"""
raise NotImplementedError('A DateView must provide an implementation of get_dated_items()')
def get_dated_queryset(self, **lookup):
"""
Get a queryset properly filtered according to `allow_future` and any
extra lookup kwargs.
"""
qs = self.get_queryset().filter(**lookup)
date_field = self.get_date_field()
allow_future = self.get_allow_future()
allow_empty = self.get_allow_empty()
if not allow_future:
qs = qs.filter(**{'%s__lte' % date_field: timezone.now()})
if not allow_empty and not qs:
raise Http404(_(u"No %(verbose_name_plural)s available") % {
'verbose_name_plural': force_unicode(qs.model._meta.verbose_name_plural)
})
return qs
def get_date_list(self, queryset, date_type):
"""
Get a date list by calling `queryset.dates()`, checking along the way
for empty lists that aren't allowed.
"""
date_field = self.get_date_field()
allow_empty = self.get_allow_empty()
date_list = queryset.dates(date_field, date_type)[::-1]
if date_list is not None and not date_list and not allow_empty:
name = force_unicode(queryset.model._meta.verbose_name_plural)
raise Http404(_(u"No %(verbose_name_plural)s available") %
{'verbose_name_plural': name})
return date_list
def get_context_data(self, **kwargs):
"""
Get the context. Must return a Context (or subclass) instance.
"""
items = kwargs.pop('object_list')
context = super(BaseDateListView, self).get_context_data(object_list=items)
context.update(kwargs)
return context
class BaseArchiveIndexView(BaseDateListView):
"""
Base class for archives of date-based items.
Requires a response mixin.
"""
context_object_name = 'latest'
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
qs = self.get_dated_queryset()
date_list = self.get_date_list(qs, 'year')
if date_list:
object_list = qs.order_by('-' + self.get_date_field())
else:
object_list = qs.none()
return (date_list, object_list, {})
class ArchiveIndexView(MultipleObjectTemplateResponseMixin, BaseArchiveIndexView):
"""
Top-level archive of date-based items.
"""
template_name_suffix = '_archive'
class BaseYearArchiveView(YearMixin, BaseDateListView):
"""
List of objects published in a given year.
"""
make_object_list = False
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
# Yes, no error checking: the URLpattern ought to validate this; it's
# an error if it doesn't.
year = self.get_year()
date_field = self.get_date_field()
qs = self.get_dated_queryset(**{date_field+'__year': year})
date_list = self.get_date_list(qs, 'month')
if self.get_make_object_list():
object_list = qs.order_by('-'+date_field)
else:
# We need this to be a queryset since parent classes introspect it
# to find information about the model.
object_list = qs.none()
return (date_list, object_list, {'year': year})
def get_make_object_list(self):
"""
Return `True` if this view should contain the full list of objects in
the given year.
"""
return self.make_object_list
class YearArchiveView(MultipleObjectTemplateResponseMixin, BaseYearArchiveView):
"""
List of objects published in a given year.
"""
template_name_suffix = '_archive_year'
class BaseMonthArchiveView(YearMixin, MonthMixin, BaseDateListView):
"""
List of objects published in a given year.
"""
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
year = self.get_year()
month = self.get_month()
date_field = self.get_date_field()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format())
# Construct a date-range lookup.
first_day, last_day = _month_bounds(date)
lookup_kwargs = {
'%s__gte' % date_field: first_day,
'%s__lt' % date_field: last_day,
}
qs = self.get_dated_queryset(**lookup_kwargs)
date_list = self.get_date_list(qs, 'day')
return (date_list, qs, {
'month': date,
'next_month': self.get_next_month(date),
'previous_month': self.get_previous_month(date),
})
class MonthArchiveView(MultipleObjectTemplateResponseMixin, BaseMonthArchiveView):
"""
List of objects published in a given year.
"""
template_name_suffix = '_archive_month'
class BaseWeekArchiveView(YearMixin, WeekMixin, BaseDateListView):
"""
List of objects published in a given week.
"""
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
year = self.get_year()
week = self.get_week()
date_field = self.get_date_field()
week_format = self.get_week_format()
week_start = {
'%W': '1',
'%U': '0',
}[week_format]
date = _date_from_string(year, self.get_year_format(),
week_start, '%w',
week, week_format)
# Construct a date-range lookup.
first_day = date
last_day = date + datetime.timedelta(days=7)
lookup_kwargs = {
'%s__gte' % date_field: first_day,
'%s__lt' % date_field: last_day,
}
qs = self.get_dated_queryset(**lookup_kwargs)
return (None, qs, {'week': date})
class WeekArchiveView(MultipleObjectTemplateResponseMixin, BaseWeekArchiveView):
"""
List of objects published in a given week.
"""
template_name_suffix = '_archive_week'
class BaseDayArchiveView(YearMixin, MonthMixin, DayMixin, BaseDateListView):
"""
List of objects published on a given day.
"""
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
year = self.get_year()
month = self.get_month()
day = self.get_day()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format(),
day, self.get_day_format())
return self._get_dated_items(date)
def _get_dated_items(self, date):
"""
Do the actual heavy lifting of getting the dated items; this accepts a
date object so that TodayArchiveView can be trivial.
"""
date_field = self.get_date_field()
field = self.get_queryset().model._meta.get_field(date_field)
lookup_kwargs = _date_lookup_for_field(field, date)
qs = self.get_dated_queryset(**lookup_kwargs)
return (None, qs, {
'day': date,
'previous_day': self.get_previous_day(date),
'next_day': self.get_next_day(date),
'previous_month': self.get_previous_month(date),
'next_month': self.get_next_month(date)
})
class DayArchiveView(MultipleObjectTemplateResponseMixin, BaseDayArchiveView):
"""
List of objects published on a given day.
"""
template_name_suffix = "_archive_day"
class BaseTodayArchiveView(BaseDayArchiveView):
"""
List of objects published today.
"""
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
return self._get_dated_items(datetime.date.today())
class TodayArchiveView(MultipleObjectTemplateResponseMixin, BaseTodayArchiveView):
"""
List of objects published today.
"""
template_name_suffix = "_archive_day"
class BaseDateDetailView(YearMixin, MonthMixin, DayMixin, DateMixin, BaseDetailView):
"""
Detail view of a single object on a single date; this differs from the
standard DetailView by accepting a year/month/day in the URL.
"""
def get_object(self, queryset=None):
"""
Get the object this request displays.
"""
year = self.get_year()
month = self.get_month()
day = self.get_day()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format(),
day, self.get_day_format())
# Use a custom queryset if provided
qs = queryset or self.get_queryset()
if not self.get_allow_future() and date > datetime.date.today():
raise Http404(_(u"Future %(verbose_name_plural)s not available because %(class_name)s.allow_future is False.") % {
'verbose_name_plural': qs.model._meta.verbose_name_plural,
'class_name': self.__class__.__name__,
})
# Filter down a queryset from self.queryset using the date from the
# URL. This'll get passed as the queryset to DetailView.get_object,
# which'll handle the 404
date_field = self.get_date_field()
field = qs.model._meta.get_field(date_field)
lookup = _date_lookup_for_field(field, date)
qs = qs.filter(**lookup)
return super(BaseDetailView, self).get_object(queryset=qs)
class DateDetailView(SingleObjectTemplateResponseMixin, BaseDateDetailView):
"""
Detail view of a single object on a single date; this differs from the
standard DetailView by accepting a year/month/day in the URL.
"""
template_name_suffix = '_detail'
def _date_from_string(year, year_format, month, month_format, day='', day_format='', delim='__'):
"""
Helper: get a datetime.date object given a format string and a year,
month, and possibly day; raise a 404 for an invalid date.
"""
format = delim.join((year_format, month_format, day_format))
datestr = delim.join((year, month, day))
try:
return datetime.datetime.strptime(datestr, format).date()
except ValueError:
raise Http404(_(u"Invalid date string '%(datestr)s' given format '%(format)s'") % {
'datestr': datestr,
'format': format,
})
def _month_bounds(date):
"""
Helper: return the first and last days of the month for the given date.
"""
first_day = date.replace(day=1)
if first_day.month == 12:
last_day = first_day.replace(year=first_day.year + 1, month=1)
else:
last_day = first_day.replace(month=first_day.month + 1)
return first_day, last_day
def _get_next_prev_month(generic_view, naive_result, is_previous, use_first_day):
"""
Helper: Get the next or the previous valid date. The idea is to allow
links on month/day views to never be 404s by never providing a date
that'll be invalid for the given view.
This is a bit complicated since it handles both next and previous months
and days (for MonthArchiveView and DayArchiveView); hence the coupling to generic_view.
However in essence the logic comes down to:
* If allow_empty and allow_future are both true, this is easy: just
return the naive result (just the next/previous day or month,
reguardless of object existence.)
* If allow_empty is true, allow_future is false, and the naive month
isn't in the future, then return it; otherwise return None.
* If allow_empty is false and allow_future is true, return the next
date *that contains a valid object*, even if it's in the future. If
there are no next objects, return None.
* If allow_empty is false and allow_future is false, return the next
date that contains a valid object. If that date is in the future, or
if there are no next objects, return None.
"""
date_field = generic_view.get_date_field()
allow_empty = generic_view.get_allow_empty()
allow_future = generic_view.get_allow_future()
# If allow_empty is True the naive value will be valid
if allow_empty:
result = naive_result
# Otherwise, we'll need to go to the database to look for an object
# whose date_field is at least (greater than/less than) the given
# naive result
else:
# Construct a lookup and an ordering depending on whether we're doing
# a previous date or a next date lookup.
if is_previous:
lookup = {'%s__lte' % date_field: naive_result}
ordering = '-%s' % date_field
else:
lookup = {'%s__gte' % date_field: naive_result}
ordering = date_field
qs = generic_view.get_queryset().filter(**lookup).order_by(ordering)
# Snag the first object from the queryset; if it doesn't exist that
# means there's no next/previous link available.
try:
result = getattr(qs[0], date_field)
except IndexError:
result = None
# Convert datetimes to a dates
if hasattr(result, 'date'):
result = result.date()
# For month views, we always want to have a date that's the first of the
# month for consistency's sake.
if result and use_first_day:
result = result.replace(day=1)
# Check against future dates.
if result and (allow_future or result < datetime.date.today()):
return result
else:
return None
def _date_lookup_for_field(field, date):
"""
Get the lookup kwargs for looking up a date against a given Field. If the
date field is a DateTimeField, we can't just do filter(df=date) because
that doesn't take the time into account. So we need to make a range lookup
in those cases.
"""
if isinstance(field, models.DateTimeField):
date_range = (
datetime.datetime.combine(date, datetime.time.min),
datetime.datetime.combine(date, datetime.time.max)
)
return {'%s__range' % field.name: date_range}
else:
return {field.name: date}
| gpl-3.0 |
hainm/pythran | pythran/tests/cases/emin.py | 5 | 1559 | #from https://gist.github.com/andersx/6061586
#runas run()
#bench run()
#pythran export run()
# A simple energy minimization program that uses steepest descent
# and a force field to minimize the energy of water in internal coordinates.
# Written by Jan H. Jensen, 2013
def Eandg(rOH,thetaHOH):
""""
Arguments: (internal coordinates of the water molecule)
rOH O-H bond distance
thetaHOH H-O-H bond angle
Returns:
E Molecular force field energy
grOH O-H bond stretch gradient
grthetaHOH H-O-H bond angle bend gradient
Force field parameters:
kOH Harmonic force constant, O-H bond strech
rOHe Equilibrium distance, O-H
kHOH Harmonic angle bend force constant, H-O-H angle bend
thetaHOHe Equilibrium angle, H-O-H
"""
kOH = 50.0
rOHe = 0.95
kHOH = 50.0
thetaHOHe = 104.5
E = 2 * kOH * (rOH - rOHe)**2 + kHOH * (thetaHOH - thetaHOHe)**2
grOH = 2 * kOH * (rOH - rOHe)
grthetaHOH = 2 * kHOH * (thetaHOH - thetaHOHe)
return (E, grOH, grthetaHOH)
def run():
c = 0.005
n_steps = 1000000
#starting geometry
rOH = 10.0
thetaHOH = 180.0
for i in range(n_steps):
(E,grOH,gthetaHOH) = Eandg(rOH,thetaHOH)
if (abs(grOH) >0.001/c or abs(gthetaHOH) > 0.01/c ):
rOH = rOH - c*grOH
thetaHOH = thetaHOH - c*gthetaHOH
converged = (abs(grOH) >0.001/c or abs(gthetaHOH) > 0.01/c )
return converged, E,rOH,thetaHOH
| bsd-3-clause |
dincamihai/django-allauth | allauth/socialaccount/providers/amazon/views.py | 73 | 1304 | import requests
from allauth.socialaccount.providers.oauth2.views import (OAuth2Adapter,
OAuth2LoginView,
OAuth2CallbackView)
from .provider import AmazonProvider
class AmazonOAuth2Adapter(OAuth2Adapter):
provider_id = AmazonProvider.id
access_token_url = 'https://api.amazon.com/auth/o2/token'
authorize_url = 'http://www.amazon.com/ap/oa'
profile_url = 'https://www.amazon.com/ap/user/profile'
supports_state = False
redirect_uri_protocol = 'https'
def complete_login(self, request, app, token, **kwargs):
response = requests.get(
self.profile_url,
params={'access_token': token})
extra_data = response.json()
if 'Profile' in extra_data:
extra_data = {
'user_id': extra_data['Profile']['CustomerId'],
'name': extra_data['Profile']['Name'],
'email': extra_data['Profile']['PrimaryEmail']
}
return self.get_provider().sociallogin_from_response(
request,
extra_data)
oauth2_login = OAuth2LoginView.adapter_view(AmazonOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(AmazonOAuth2Adapter)
| mit |
ShineFan/odoo | addons/account/account_move_line.py | 109 | 78511 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from openerp import workflow
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp import tools
from openerp.report import report_sxw
import openerp
class account_move_line(osv.osv):
_name = "account.move.line"
_description = "Journal Items"
def _query_get(self, cr, uid, obj='l', context=None):
fiscalyear_obj = self.pool.get('account.fiscalyear')
fiscalperiod_obj = self.pool.get('account.period')
account_obj = self.pool.get('account.account')
fiscalyear_ids = []
context = dict(context or {})
initial_bal = context.get('initial_bal', False)
company_clause = " "
query = ''
query_params = {}
if context.get('company_id'):
company_clause = " AND " +obj+".company_id = %(company_id)s"
query_params['company_id'] = context['company_id']
if not context.get('fiscalyear'):
if context.get('all_fiscalyear'):
#this option is needed by the aged balance report because otherwise, if we search only the draft ones, an open invoice of a closed fiscalyear won't be displayed
fiscalyear_ids = fiscalyear_obj.search(cr, uid, [])
else:
fiscalyear_ids = fiscalyear_obj.search(cr, uid, [('state', '=', 'draft')])
else:
#for initial balance as well as for normal query, we check only the selected FY because the best practice is to generate the FY opening entries
fiscalyear_ids = context['fiscalyear']
if isinstance(context['fiscalyear'], (int, long)):
fiscalyear_ids = [fiscalyear_ids]
query_params['fiscalyear_ids'] = tuple(fiscalyear_ids) or (0,)
state = context.get('state', False)
where_move_state = ''
where_move_lines_by_date = ''
if context.get('date_from') and context.get('date_to'):
query_params['date_from'] = context['date_from']
query_params['date_to'] = context['date_to']
if initial_bal:
where_move_lines_by_date = " AND " +obj+".move_id IN (SELECT id FROM account_move WHERE date < %(date_from)s)"
else:
where_move_lines_by_date = " AND " +obj+".move_id IN (SELECT id FROM account_move WHERE date >= %(date_from)s AND date <= %(date_to)s)"
if state:
if state.lower() not in ['all']:
query_params['state'] = state
where_move_state= " AND "+obj+".move_id IN (SELECT id FROM account_move WHERE account_move.state = %(state)s)"
if context.get('period_from') and context.get('period_to') and not context.get('periods'):
if initial_bal:
period_company_id = fiscalperiod_obj.browse(cr, uid, context['period_from'], context=context).company_id.id
first_period = fiscalperiod_obj.search(cr, uid, [('company_id', '=', period_company_id)], order='date_start', limit=1)[0]
context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, first_period, context['period_from'])
else:
context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, context['period_from'], context['period_to'])
if context.get('periods'):
query_params['period_ids'] = tuple(context['periods'])
if initial_bal:
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN %(fiscalyear_ids)s)" + where_move_state + where_move_lines_by_date
period_ids = fiscalperiod_obj.search(cr, uid, [('id', 'in', context['periods'])], order='date_start', limit=1)
if period_ids and period_ids[0]:
first_period = fiscalperiod_obj.browse(cr, uid, period_ids[0], context=context)
query_params['date_start'] = first_period.date_start
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN %(fiscalyear_ids)s AND date_start <= %(date_start)s AND id NOT IN %(period_ids)s)" + where_move_state + where_move_lines_by_date
else:
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN %(fiscalyear_ids)s AND id IN %(period_ids)s)" + where_move_state + where_move_lines_by_date
else:
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN %(fiscalyear_ids)s)" + where_move_state + where_move_lines_by_date
if initial_bal and not context.get('periods') and not where_move_lines_by_date:
#we didn't pass any filter in the context, and the initial balance can't be computed using only the fiscalyear otherwise entries will be summed twice
#so we have to invalidate this query
raise osv.except_osv(_('Warning!'),_("You have not supplied enough arguments to compute the initial balance, please select a period and a journal in the context."))
if context.get('journal_ids'):
query_params['journal_ids'] = tuple(context['journal_ids'])
query += ' AND '+obj+'.journal_id IN %(journal_ids)s'
if context.get('chart_account_id'):
child_ids = account_obj._get_children_and_consol(cr, uid, [context['chart_account_id']], context=context)
query_params['child_ids'] = tuple(child_ids)
query += ' AND '+obj+'.account_id IN %(child_ids)s'
query += company_clause
return cr.mogrify(query, query_params)
def _amount_residual(self, cr, uid, ids, field_names, args, context=None):
"""
This function returns the residual amount on a receivable or payable account.move.line.
By default, it returns an amount in the currency of this journal entry (maybe different
of the company currency), but if you pass 'residual_in_company_currency' = True in the
context then the returned amount will be in company currency.
"""
res = {}
if context is None:
context = {}
cur_obj = self.pool.get('res.currency')
for move_line in self.browse(cr, uid, ids, context=context):
res[move_line.id] = {
'amount_residual': 0.0,
'amount_residual_currency': 0.0,
}
if move_line.reconcile_id:
continue
if not move_line.account_id.reconcile:
#this function does not suport to be used on move lines not related to a reconcilable account
continue
if move_line.currency_id:
move_line_total = move_line.amount_currency
sign = move_line.amount_currency < 0 and -1 or 1
else:
move_line_total = move_line.debit - move_line.credit
sign = (move_line.debit - move_line.credit) < 0 and -1 or 1
line_total_in_company_currency = move_line.debit - move_line.credit
context_unreconciled = context.copy()
if move_line.reconcile_partial_id:
for payment_line in move_line.reconcile_partial_id.line_partial_ids:
if payment_line.id == move_line.id:
continue
if payment_line.currency_id and move_line.currency_id and payment_line.currency_id.id == move_line.currency_id.id:
move_line_total += payment_line.amount_currency
else:
if move_line.currency_id:
context_unreconciled.update({'date': payment_line.date})
amount_in_foreign_currency = cur_obj.compute(cr, uid, move_line.company_id.currency_id.id, move_line.currency_id.id, (payment_line.debit - payment_line.credit), round=False, context=context_unreconciled)
move_line_total += amount_in_foreign_currency
else:
move_line_total += (payment_line.debit - payment_line.credit)
line_total_in_company_currency += (payment_line.debit - payment_line.credit)
result = move_line_total
res[move_line.id]['amount_residual_currency'] = sign * (move_line.currency_id and self.pool.get('res.currency').round(cr, uid, move_line.currency_id, result) or result)
res[move_line.id]['amount_residual'] = sign * line_total_in_company_currency
return res
def default_get(self, cr, uid, fields, context=None):
data = self._default_get(cr, uid, fields, context=context)
for f in data.keys():
if f not in fields:
del data[f]
return data
def _prepare_analytic_line(self, cr, uid, obj_line, context=None):
"""
Prepare the values given at the create() of account.analytic.line upon the validation of a journal item having
an analytic account. This method is intended to be extended in other modules.
:param obj_line: browse record of the account.move.line that triggered the analytic line creation
"""
return {'name': obj_line.name,
'date': obj_line.date,
'account_id': obj_line.analytic_account_id.id,
'unit_amount': obj_line.quantity,
'product_id': obj_line.product_id and obj_line.product_id.id or False,
'product_uom_id': obj_line.product_uom_id and obj_line.product_uom_id.id or False,
'amount': (obj_line.credit or 0.0) - (obj_line.debit or 0.0),
'general_account_id': obj_line.account_id.id,
'journal_id': obj_line.journal_id.analytic_journal_id.id,
'ref': obj_line.ref,
'move_id': obj_line.id,
'user_id': obj_line.invoice.user_id.id or uid,
}
def create_analytic_lines(self, cr, uid, ids, context=None):
acc_ana_line_obj = self.pool.get('account.analytic.line')
for obj_line in self.browse(cr, uid, ids, context=context):
if obj_line.analytic_lines:
acc_ana_line_obj.unlink(cr,uid,[obj.id for obj in obj_line.analytic_lines])
if obj_line.analytic_account_id:
if not obj_line.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal!'),_("You have to define an analytic journal on the '%s' journal!") % (obj_line.journal_id.name, ))
vals_line = self._prepare_analytic_line(cr, uid, obj_line, context=context)
acc_ana_line_obj.create(cr, uid, vals_line)
return True
def _default_get_move_form_hook(self, cursor, user, data):
'''Called in the end of default_get method for manual entry in account_move form'''
if data.has_key('analytic_account_id'):
del(data['analytic_account_id'])
if data.has_key('account_tax_id'):
del(data['account_tax_id'])
return data
def convert_to_period(self, cr, uid, context=None):
if context is None:
context = {}
period_obj = self.pool.get('account.period')
#check if the period_id changed in the context from client side
if context.get('period_id', False):
period_id = context.get('period_id')
if type(period_id) == str:
ids = period_obj.search(cr, uid, [('name', 'ilike', period_id)])
context = dict(context, period_id=ids and ids[0] or False)
return context
def _default_get(self, cr, uid, fields, context=None):
#default_get should only do the following:
# -propose the next amount in debit/credit in order to balance the move
# -propose the next account from the journal (default debit/credit account) accordingly
context = dict(context or {})
account_obj = self.pool.get('account.account')
period_obj = self.pool.get('account.period')
journal_obj = self.pool.get('account.journal')
move_obj = self.pool.get('account.move')
tax_obj = self.pool.get('account.tax')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
partner_obj = self.pool.get('res.partner')
currency_obj = self.pool.get('res.currency')
if not context.get('journal_id', False):
context['journal_id'] = context.get('search_default_journal_id', False)
if not context.get('period_id', False):
context['period_id'] = context.get('search_default_period_id', False)
context = self.convert_to_period(cr, uid, context)
# Compute simple values
data = super(account_move_line, self).default_get(cr, uid, fields, context=context)
if context.get('journal_id'):
total = 0.0
#in account.move form view, it is not possible to compute total debit and credit using
#a browse record. So we must use the context to pass the whole one2many field and compute the total
if context.get('line_id'):
for move_line_dict in move_obj.resolve_2many_commands(cr, uid, 'line_id', context.get('line_id'), context=context):
data['name'] = data.get('name') or move_line_dict.get('name')
data['partner_id'] = data.get('partner_id') or move_line_dict.get('partner_id')
total += move_line_dict.get('debit', 0.0) - move_line_dict.get('credit', 0.0)
elif context.get('period_id'):
#find the date and the ID of the last unbalanced account.move encoded by the current user in that journal and period
move_id = False
cr.execute('''SELECT move_id, date FROM account_move_line
WHERE journal_id = %s AND period_id = %s AND create_uid = %s AND state = %s
ORDER BY id DESC limit 1''', (context['journal_id'], context['period_id'], uid, 'draft'))
res = cr.fetchone()
move_id = res and res[0] or False
data['date'] = res and res[1] or period_obj.browse(cr, uid, context['period_id'], context=context).date_start
data['move_id'] = move_id
if move_id:
#if there exist some unbalanced accounting entries that match the journal and the period,
#we propose to continue the same move by copying the ref, the name, the partner...
move = move_obj.browse(cr, uid, move_id, context=context)
data.setdefault('name', move.line_id[-1].name)
for l in move.line_id:
data['partner_id'] = data.get('partner_id') or l.partner_id.id
data['ref'] = data.get('ref') or l.ref
total += (l.debit or 0.0) - (l.credit or 0.0)
#compute the total of current move
data['debit'] = total < 0 and -total or 0.0
data['credit'] = total > 0 and total or 0.0
#pick the good account on the journal accordingly if the next proposed line will be a debit or a credit
journal_data = journal_obj.browse(cr, uid, context['journal_id'], context=context)
account = total > 0 and journal_data.default_credit_account_id or journal_data.default_debit_account_id
#map the account using the fiscal position of the partner, if needed
if isinstance(data.get('partner_id'), (int, long)):
part = partner_obj.browse(cr, uid, data['partner_id'], context=context)
elif isinstance(data.get('partner_id'), (tuple, list)):
part = partner_obj.browse(cr, uid, data['partner_id'][0], context=context)
else:
part = False
if account and part:
account = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, account.id)
account = account_obj.browse(cr, uid, account, context=context)
data['account_id'] = account and account.id or False
#compute the amount in secondary currency of the account, if needed
if account and account.currency_id:
data['currency_id'] = account.currency_id.id
#set the context for the multi currency change
compute_ctx = context.copy()
compute_ctx.update({
#the following 2 parameters are used to choose the currency rate, in case where the account
#doesn't work with an outgoing currency rate method 'at date' but 'average'
'res.currency.compute.account': account,
'res.currency.compute.account_invert': True,
})
if data.get('date'):
compute_ctx.update({'date': data['date']})
data['amount_currency'] = currency_obj.compute(cr, uid, account.company_id.currency_id.id, data['currency_id'], -total, context=compute_ctx)
data = self._default_get_move_form_hook(cr, uid, data)
return data
def on_create_write(self, cr, uid, id, context=None):
if not id:
return []
ml = self.browse(cr, uid, id, context=context)
domain = (context or {}).get('on_write_domain', [])
return self.pool.get('account.move.line').search(cr, uid, domain + [['id', 'in', [l.id for l in ml.move_id.line_id]]], context=context)
def _balance(self, cr, uid, ids, name, arg, context=None):
if context is None:
context = {}
c = context.copy()
c['initital_bal'] = True
sql = """SELECT l1.id, COALESCE(SUM(l2.debit-l2.credit), 0)
FROM account_move_line l1 LEFT JOIN account_move_line l2
ON (l1.account_id = l2.account_id
AND l2.id <= l1.id
AND """ + \
self._query_get(cr, uid, obj='l2', context=c) + \
") WHERE l1.id IN %s GROUP BY l1.id"
cr.execute(sql, [tuple(ids)])
return dict(cr.fetchall())
def _invoice(self, cursor, user, ids, name, arg, context=None):
invoice_obj = self.pool.get('account.invoice')
res = {}
for line_id in ids:
res[line_id] = False
cursor.execute('SELECT l.id, i.id ' \
'FROM account_move_line l, account_invoice i ' \
'WHERE l.move_id = i.move_id ' \
'AND l.id IN %s',
(tuple(ids),))
invoice_ids = []
for line_id, invoice_id in cursor.fetchall():
res[line_id] = invoice_id
invoice_ids.append(invoice_id)
invoice_names = {}
for invoice_id, name in invoice_obj.name_get(cursor, user, invoice_ids, context=context):
invoice_names[invoice_id] = name
for line_id in res.keys():
invoice_id = res[line_id]
res[line_id] = invoice_id and (invoice_id, invoice_names[invoice_id]) or False
return res
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
result = []
for line in self.browse(cr, uid, ids, context=context):
if line.ref:
result.append((line.id, (line.move_id.name or '')+' ('+line.ref+')'))
else:
result.append((line.id, line.move_id.name))
return result
def _balance_search(self, cursor, user, obj, name, args, domain=None, context=None):
if context is None:
context = {}
if not args:
return []
where = ' AND '.join(map(lambda x: '(abs(sum(debit-credit))'+x[1]+str(x[2])+')',args))
cursor.execute('SELECT id, SUM(debit-credit) FROM account_move_line \
GROUP BY id, debit, credit having '+where)
res = cursor.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', [x[0] for x in res])]
def _invoice_search(self, cursor, user, obj, name, args, context=None):
if not args:
return []
invoice_obj = self.pool.get('account.invoice')
i = 0
while i < len(args):
fargs = args[i][0].split('.', 1)
if len(fargs) > 1:
args[i] = (fargs[0], 'in', invoice_obj.search(cursor, user,
[(fargs[1], args[i][1], args[i][2])]))
i += 1
continue
if isinstance(args[i][2], basestring):
res_ids = invoice_obj.name_search(cursor, user, args[i][2], [],
args[i][1])
args[i] = (args[i][0], 'in', [x[0] for x in res_ids])
i += 1
qu1, qu2 = [], []
for x in args:
if x[1] != 'in':
if (x[2] is False) and (x[1] == '='):
qu1.append('(i.id IS NULL)')
elif (x[2] is False) and (x[1] == '<>' or x[1] == '!='):
qu1.append('(i.id IS NOT NULL)')
else:
qu1.append('(i.id %s %s)' % (x[1], '%s'))
qu2.append(x[2])
elif x[1] == 'in':
if len(x[2]) > 0:
qu1.append('(i.id IN (%s))' % (','.join(['%s'] * len(x[2]))))
qu2 += x[2]
else:
qu1.append(' (False)')
if qu1:
qu1 = ' AND' + ' AND'.join(qu1)
else:
qu1 = ''
cursor.execute('SELECT l.id ' \
'FROM account_move_line l, account_invoice i ' \
'WHERE l.move_id = i.move_id ' + qu1, qu2)
res = cursor.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', [x[0] for x in res])]
def _get_move_lines(self, cr, uid, ids, context=None):
result = []
for move in self.pool.get('account.move').browse(cr, uid, ids, context=context):
for line in move.line_id:
result.append(line.id)
return result
def _get_reconcile(self, cr, uid, ids,name, unknow_none, context=None):
res = dict.fromkeys(ids, False)
for line in self.browse(cr, uid, ids, context=context):
if line.reconcile_id:
res[line.id] = str(line.reconcile_id.name)
elif line.reconcile_partial_id:
res[line.id] = str(line.reconcile_partial_id.name)
return res
def _get_move_from_reconcile(self, cr, uid, ids, context=None):
move = {}
for r in self.pool.get('account.move.reconcile').browse(cr, uid, ids, context=context):
for line in r.line_partial_ids:
move[line.move_id.id] = True
for line in r.line_id:
move[line.move_id.id] = True
move_line_ids = []
if move:
move_line_ids = self.pool.get('account.move.line').search(cr, uid, [('move_id','in',move.keys())], context=context)
return move_line_ids
_columns = {
'name': fields.char('Name', required=True),
'quantity': fields.float('Quantity', digits=(16,2), help="The optional quantity expressed by this line, eg: number of product sold. The quantity is not a legal requirement but is very useful for some reports."),
'product_uom_id': fields.many2one('product.uom', 'Unit of Measure'),
'product_id': fields.many2one('product.product', 'Product'),
'debit': fields.float('Debit', digits_compute=dp.get_precision('Account')),
'credit': fields.float('Credit', digits_compute=dp.get_precision('Account')),
'account_id': fields.many2one('account.account', 'Account', required=True, ondelete="cascade", domain=[('type','<>','view'), ('type', '<>', 'closed')], select=2),
'move_id': fields.many2one('account.move', 'Journal Entry', ondelete="cascade", help="The move of this entry line.", select=2, required=True, auto_join=True),
'narration': fields.related('move_id','narration', type='text', relation='account.move', string='Internal Note'),
'ref': fields.related('move_id', 'ref', string='Reference', type='char', store=True),
'statement_id': fields.many2one('account.bank.statement', 'Statement', help="The bank statement used for bank reconciliation", select=1, copy=False),
'reconcile_id': fields.many2one('account.move.reconcile', 'Reconcile', readonly=True, ondelete='set null', select=2, copy=False),
'reconcile_partial_id': fields.many2one('account.move.reconcile', 'Partial Reconcile', readonly=True, ondelete='set null', select=2, copy=False),
'reconcile_ref': fields.function(_get_reconcile, type='char', string='Reconcile Ref', oldname='reconcile', store={
'account.move.line': (lambda self, cr, uid, ids, c={}: ids, ['reconcile_id','reconcile_partial_id'], 50),'account.move.reconcile': (_get_move_from_reconcile, None, 50)}),
'amount_currency': fields.float('Amount Currency', help="The amount expressed in an optional other currency if it is a multi-currency entry.", digits_compute=dp.get_precision('Account')),
'amount_residual_currency': fields.function(_amount_residual, string='Residual Amount in Currency', multi="residual", help="The residual amount on a receivable or payable of a journal entry expressed in its currency (maybe different of the company currency)."),
'amount_residual': fields.function(_amount_residual, string='Residual Amount', multi="residual", help="The residual amount on a receivable or payable of a journal entry expressed in the company currency."),
'currency_id': fields.many2one('res.currency', 'Currency', help="The optional other currency if it is a multi-currency entry."),
'journal_id': fields.related('move_id', 'journal_id', string='Journal', type='many2one', relation='account.journal', required=True, select=True,
store = {
'account.move': (_get_move_lines, ['journal_id'], 20)
}),
'period_id': fields.related('move_id', 'period_id', string='Period', type='many2one', relation='account.period', required=True, select=True,
store = {
'account.move': (_get_move_lines, ['period_id'], 20)
}),
'blocked': fields.boolean('No Follow-up', help="You can check this box to mark this journal item as a litigation with the associated partner"),
'partner_id': fields.many2one('res.partner', 'Partner', select=1, ondelete='restrict'),
'date_maturity': fields.date('Due date', select=True ,help="This field is used for payable and receivable journal entries. You can put the limit date for the payment of this line."),
'date': fields.related('move_id','date', string='Effective date', type='date', required=True, select=True,
store = {
'account.move': (_get_move_lines, ['date'], 20)
}),
'date_created': fields.date('Creation date', select=True),
'analytic_lines': fields.one2many('account.analytic.line', 'move_id', 'Analytic lines'),
'centralisation': fields.selection([('normal','Normal'),('credit','Credit Centralisation'),('debit','Debit Centralisation'),('currency','Currency Adjustment')], 'Centralisation', size=8),
'balance': fields.function(_balance, fnct_search=_balance_search, string='Balance'),
'state': fields.selection([('draft','Unbalanced'), ('valid','Balanced')], 'Status', readonly=True, copy=False),
'tax_code_id': fields.many2one('account.tax.code', 'Tax Account', help="The Account can either be a base tax code or a tax code account."),
'tax_amount': fields.float('Tax/Base Amount', digits_compute=dp.get_precision('Account'), select=True, help="If the Tax account is a tax code account, this field will contain the taxed amount.If the tax account is base tax code, "\
"this field will contain the basic amount(without tax)."),
'invoice': fields.function(_invoice, string='Invoice',
type='many2one', relation='account.invoice', fnct_search=_invoice_search),
'account_tax_id':fields.many2one('account.tax', 'Tax', copy=False),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'company_id': fields.related('account_id', 'company_id', type='many2one', relation='res.company',
string='Company', store=True, readonly=True)
}
def _get_date(self, cr, uid, context=None):
if context is None:
context or {}
period_obj = self.pool.get('account.period')
dt = time.strftime('%Y-%m-%d')
if context.get('journal_id') and context.get('period_id'):
cr.execute('SELECT date FROM account_move_line ' \
'WHERE journal_id = %s AND period_id = %s ' \
'ORDER BY id DESC limit 1',
(context['journal_id'], context['period_id']))
res = cr.fetchone()
if res:
dt = res[0]
else:
period = period_obj.browse(cr, uid, context['period_id'], context=context)
dt = period.date_start
return dt
def _get_currency(self, cr, uid, context=None):
if context is None:
context = {}
if not context.get('journal_id', False):
return False
cur = self.pool.get('account.journal').browse(cr, uid, context['journal_id']).currency
return cur and cur.id or False
def _get_period(self, cr, uid, context=None):
"""
Return default account period value
"""
context = context or {}
if context.get('period_id', False):
return context['period_id']
account_period_obj = self.pool.get('account.period')
ids = account_period_obj.find(cr, uid, context=context)
period_id = False
if ids:
period_id = ids[0]
return period_id
def _get_journal(self, cr, uid, context=None):
"""
Return journal based on the journal type
"""
context = context or {}
if context.get('journal_id', False):
return context['journal_id']
journal_id = False
journal_pool = self.pool.get('account.journal')
if context.get('journal_type', False):
jids = journal_pool.search(cr, uid, [('type','=', context.get('journal_type'))])
if not jids:
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'account', 'action_account_journal_form')
msg = _("""Cannot find any account journal of "%s" type for this company, You should create one.\n Please go to Journal Configuration""") % context.get('journal_type').replace('_', ' ').title()
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
journal_id = jids[0]
return journal_id
_defaults = {
'blocked': False,
'centralisation': 'normal',
'date': _get_date,
'date_created': fields.date.context_today,
'state': 'draft',
'currency_id': _get_currency,
'journal_id': _get_journal,
'credit': 0.0,
'debit': 0.0,
'amount_currency': 0.0,
'account_id': lambda self, cr, uid, c: c.get('account_id', False),
'period_id': _get_period,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.move.line', context=c)
}
_order = "date desc, id desc"
_sql_constraints = [
('credit_debit1', 'CHECK (credit*debit=0)', 'Wrong credit or debit value in accounting entry !'),
('credit_debit2', 'CHECK (credit+debit>=0)', 'Wrong credit or debit value in accounting entry !'),
]
def _auto_init(self, cr, context=None):
res = super(account_move_line, self)._auto_init(cr, context=context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'account_move_line_journal_id_period_id_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX account_move_line_journal_id_period_id_index '
'ON account_move_line (journal_id, period_id, state, create_uid, id DESC)')
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('account_move_line_date_id_index',))
if not cr.fetchone():
cr.execute('CREATE INDEX account_move_line_date_id_index ON account_move_line (date DESC, id desc)')
return res
def _check_no_view(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.account_id.type in ('view', 'consolidation'):
return False
return True
def _check_no_closed(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.account_id.type == 'closed':
raise osv.except_osv(_('Error!'), _('You cannot create journal items on a closed account %s %s.') % (l.account_id.code, l.account_id.name))
return True
def _check_company_id(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.company_id != l.account_id.company_id or l.company_id != l.period_id.company_id:
return False
return True
def _check_date(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.journal_id.allow_date:
if not time.strptime(l.date[:10],'%Y-%m-%d') >= time.strptime(l.period_id.date_start, '%Y-%m-%d') or not time.strptime(l.date[:10], '%Y-%m-%d') <= time.strptime(l.period_id.date_stop, '%Y-%m-%d'):
return False
return True
def _check_currency(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.account_id.currency_id:
if not l.currency_id or not l.currency_id.id == l.account_id.currency_id.id:
return False
return True
def _check_currency_and_amount(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if (l.amount_currency and not l.currency_id):
return False
return True
def _check_currency_amount(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.amount_currency:
if (l.amount_currency > 0.0 and l.credit > 0.0) or (l.amount_currency < 0.0 and l.debit > 0.0):
return False
return True
def _check_currency_company(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.currency_id.id == l.company_id.currency_id.id:
return False
return True
_constraints = [
(_check_no_view, 'You cannot create journal items on an account of type view or consolidation.', ['account_id']),
(_check_no_closed, 'You cannot create journal items on closed account.', ['account_id']),
(_check_company_id, 'Account and Period must belong to the same company.', ['company_id']),
(_check_date, 'The date of your Journal Entry is not in the defined period! You should change the date or remove this constraint from the journal.', ['date']),
(_check_currency, 'The selected account of your Journal Entry forces to provide a secondary currency. You should remove the secondary currency on the account or select a multi-currency view on the journal.', ['currency_id']),
(_check_currency_and_amount, "You cannot create journal items with a secondary currency without recording both 'currency' and 'amount currency' field.", ['currency_id','amount_currency']),
(_check_currency_amount, 'The amount expressed in the secondary currency must be positive when account is debited and negative when account is credited.', ['amount_currency']),
(_check_currency_company, "You cannot provide a secondary currency if it is the same than the company one." , ['currency_id']),
]
#TODO: ONCHANGE_ACCOUNT_ID: set account_tax_id
def onchange_currency(self, cr, uid, ids, account_id, amount, currency_id, date=False, journal=False, context=None):
if context is None:
context = {}
account_obj = self.pool.get('account.account')
journal_obj = self.pool.get('account.journal')
currency_obj = self.pool.get('res.currency')
if (not currency_id) or (not account_id):
return {}
result = {}
acc = account_obj.browse(cr, uid, account_id, context=context)
if (amount>0) and journal:
x = journal_obj.browse(cr, uid, journal).default_credit_account_id
if x: acc = x
context = dict(context)
context.update({
'date': date,
'res.currency.compute.account': acc,
})
v = currency_obj.compute(cr, uid, currency_id, acc.company_id.currency_id.id, amount, context=context)
result['value'] = {
'debit': v > 0 and v or 0.0,
'credit': v < 0 and -v or 0.0
}
return result
def onchange_partner_id(self, cr, uid, ids, move_id, partner_id, account_id=None, debit=0, credit=0, date=False, journal=False, context=None):
partner_obj = self.pool.get('res.partner')
payment_term_obj = self.pool.get('account.payment.term')
journal_obj = self.pool.get('account.journal')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
val = {}
val['date_maturity'] = False
if not partner_id:
return {'value':val}
if not date:
date = datetime.now().strftime('%Y-%m-%d')
jt = False
if journal:
jt = journal_obj.browse(cr, uid, journal, context=context).type
part = partner_obj.browse(cr, uid, partner_id, context=context)
payment_term_id = False
if jt and jt in ('purchase', 'purchase_refund') and part.property_supplier_payment_term:
payment_term_id = part.property_supplier_payment_term.id
elif jt and part.property_payment_term:
payment_term_id = part.property_payment_term.id
if payment_term_id:
res = payment_term_obj.compute(cr, uid, payment_term_id, 100, date)
if res:
val['date_maturity'] = res[0][0]
if not account_id:
id1 = part.property_account_payable.id
id2 = part.property_account_receivable.id
if jt:
if jt in ('sale', 'purchase_refund'):
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id2)
elif jt in ('purchase', 'sale_refund'):
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id1)
elif jt in ('general', 'bank', 'cash'):
if part.customer:
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id2)
elif part.supplier:
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id1)
if val.get('account_id', False):
d = self.onchange_account_id(cr, uid, ids, account_id=val['account_id'], partner_id=part.id, context=context)
val.update(d['value'])
return {'value':val}
def onchange_account_id(self, cr, uid, ids, account_id=False, partner_id=False, context=None):
account_obj = self.pool.get('account.account')
partner_obj = self.pool.get('res.partner')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
val = {}
if account_id:
res = account_obj.browse(cr, uid, account_id, context=context)
tax_ids = res.tax_ids
if tax_ids and partner_id:
part = partner_obj.browse(cr, uid, partner_id, context=context)
tax_id = fiscal_pos_obj.map_tax(cr, uid, part and part.property_account_position or False, tax_ids)[0]
else:
tax_id = tax_ids and tax_ids[0].id or False
val['account_tax_id'] = tax_id
return {'value': val}
#
# type: the type if reconciliation (no logic behind this field, for info)
#
# writeoff; entry generated for the difference between the lines
#
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
if context.get('fiscalyear'):
args.append(('period_id.fiscalyear_id', '=', context.get('fiscalyear', False)))
if context and context.get('next_partner_only', False):
if not context.get('partner_id', False):
partner = self.list_partners_to_reconcile(cr, uid, context=context)
if partner:
partner = partner[0]
else:
partner = context.get('partner_id', False)
if not partner:
return []
args.append(('partner_id', '=', partner[0]))
return super(account_move_line, self).search(cr, uid, args, offset, limit, order, context, count)
def prepare_move_lines_for_reconciliation_widget(self, cr, uid, lines, target_currency=False, target_date=False, context=None):
""" Returns move lines formatted for the manual/bank reconciliation widget
:param target_currency: curreny you want the move line debit/credit converted into
:param target_date: date to use for the monetary conversion
"""
if not lines:
return []
if context is None:
context = {}
ctx = context.copy()
currency_obj = self.pool.get('res.currency')
company_currency = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id
rml_parser = report_sxw.rml_parse(cr, uid, 'reconciliation_widget_aml', context=context)
ret = []
for line in lines:
partial_reconciliation_siblings_ids = []
if line.reconcile_partial_id:
partial_reconciliation_siblings_ids = self.search(cr, uid, [('reconcile_partial_id', '=', line.reconcile_partial_id.id)], context=context)
partial_reconciliation_siblings_ids.remove(line.id)
ret_line = {
'id': line.id,
'name': line.name != '/' and line.move_id.name + ': ' + line.name or line.move_id.name,
'ref': line.move_id.ref or '',
'account_code': line.account_id.code,
'account_name': line.account_id.name,
'account_type': line.account_id.type,
'date_maturity': line.date_maturity,
'date': line.date,
'period_name': line.period_id.name,
'journal_name': line.journal_id.name,
'partner_id': line.partner_id.id,
'partner_name': line.partner_id.name,
'is_partially_reconciled': bool(line.reconcile_partial_id),
'partial_reconciliation_siblings_ids': partial_reconciliation_siblings_ids,
}
# Amount residual can be negative
debit = line.debit
credit = line.credit
amount = line.amount_residual
amount_currency = line.amount_residual_currency
if line.amount_residual < 0:
debit, credit = credit, debit
amount = -amount
amount_currency = -amount_currency
# Get right debit / credit:
target_currency = target_currency or company_currency
line_currency = line.currency_id or company_currency
amount_currency_str = ""
total_amount_currency_str = ""
if line_currency != company_currency:
total_amount = line.amount_currency
actual_debit = debit > 0 and amount_currency or 0.0
actual_credit = credit > 0 and amount_currency or 0.0
else:
total_amount = abs(debit - credit)
actual_debit = debit > 0 and amount or 0.0
actual_credit = credit > 0 and amount or 0.0
if line_currency != target_currency:
amount_currency_str = rml_parser.formatLang(actual_debit or actual_credit, currency_obj=line_currency)
total_amount_currency_str = rml_parser.formatLang(total_amount, currency_obj=line_currency)
ret_line['credit_currency'] = actual_credit
ret_line['debit_currency'] = actual_debit
if target_currency == company_currency:
actual_debit = debit
actual_credit = credit
total_amount = debit or credit
else:
ctx = context.copy()
ctx.update({'date': line.date})
total_amount = currency_obj.compute(cr, uid, line_currency.id, target_currency.id, total_amount, context=ctx)
actual_debit = currency_obj.compute(cr, uid, line_currency.id, target_currency.id, actual_debit, context=ctx)
actual_credit = currency_obj.compute(cr, uid, line_currency.id, target_currency.id, actual_credit, context=ctx)
amount_str = rml_parser.formatLang(actual_debit or actual_credit, currency_obj=target_currency)
total_amount_str = rml_parser.formatLang(total_amount, currency_obj=target_currency)
ret_line['debit'] = actual_debit
ret_line['credit'] = actual_credit
ret_line['amount_str'] = amount_str
ret_line['total_amount_str'] = total_amount_str
ret_line['amount_currency_str'] = amount_currency_str
ret_line['total_amount_currency_str'] = total_amount_currency_str
ret.append(ret_line)
return ret
def list_partners_to_reconcile(self, cr, uid, context=None, filter_domain=False):
line_ids = []
if filter_domain:
line_ids = self.search(cr, uid, filter_domain, context=context)
where_clause = filter_domain and "AND l.id = ANY(%s)" or ""
cr.execute(
"""SELECT partner_id FROM (
SELECT l.partner_id, p.last_reconciliation_date, SUM(l.debit) AS debit, SUM(l.credit) AS credit, MAX(l.create_date) AS max_date
FROM account_move_line l
RIGHT JOIN account_account a ON (a.id = l.account_id)
RIGHT JOIN res_partner p ON (l.partner_id = p.id)
WHERE a.reconcile IS TRUE
AND l.reconcile_id IS NULL
AND l.state <> 'draft'
%s
GROUP BY l.partner_id, p.last_reconciliation_date
) AS s
WHERE debit > 0 AND credit > 0 AND (last_reconciliation_date IS NULL OR max_date > last_reconciliation_date)
ORDER BY last_reconciliation_date"""
% where_clause, (line_ids,))
ids = [x[0] for x in cr.fetchall()]
if not ids:
return []
# To apply the ir_rules
partner_obj = self.pool.get('res.partner')
ids = partner_obj.search(cr, uid, [('id', 'in', ids)], context=context)
return partner_obj.name_get(cr, uid, ids, context=context)
def reconcile_partial(self, cr, uid, ids, type='auto', context=None, writeoff_acc_id=False, writeoff_period_id=False, writeoff_journal_id=False):
move_rec_obj = self.pool.get('account.move.reconcile')
merges = []
unmerge = []
total = 0.0
merges_rec = []
company_list = []
if context is None:
context = {}
for line in self.browse(cr, uid, ids, context=context):
if company_list and not line.company_id.id in company_list:
raise osv.except_osv(_('Warning!'), _('To reconcile the entries company should be the same for all entries.'))
company_list.append(line.company_id.id)
for line in self.browse(cr, uid, ids, context=context):
if line.account_id.currency_id:
currency_id = line.account_id.currency_id
else:
currency_id = line.company_id.currency_id
if line.reconcile_id:
raise osv.except_osv(_('Warning'), _("Journal Item '%s' (id: %s), Move '%s' is already reconciled!") % (line.name, line.id, line.move_id.name))
if line.reconcile_partial_id:
for line2 in line.reconcile_partial_id.line_partial_ids:
if line2.state != 'valid':
raise osv.except_osv(_('Warning'), _("Journal Item '%s' (id: %s) cannot be used in a reconciliation as it is not balanced!") % (line2.name, line2.id))
if not line2.reconcile_id:
if line2.id not in merges:
merges.append(line2.id)
if line2.account_id.currency_id:
total += line2.amount_currency
else:
total += (line2.debit or 0.0) - (line2.credit or 0.0)
merges_rec.append(line.reconcile_partial_id.id)
else:
unmerge.append(line.id)
if line.account_id.currency_id:
total += line.amount_currency
else:
total += (line.debit or 0.0) - (line.credit or 0.0)
if self.pool.get('res.currency').is_zero(cr, uid, currency_id, total):
res = self.reconcile(cr, uid, merges+unmerge, context=context, writeoff_acc_id=writeoff_acc_id, writeoff_period_id=writeoff_period_id, writeoff_journal_id=writeoff_journal_id)
return res
# marking the lines as reconciled does not change their validity, so there is no need
# to revalidate their moves completely.
reconcile_context = dict(context, novalidate=True)
r_id = move_rec_obj.create(cr, uid, {
'type': type,
'line_partial_ids': map(lambda x: (4,x,False), merges+unmerge)
}, context=reconcile_context)
move_rec_obj.reconcile_partial_check(cr, uid, [r_id] + merges_rec, context=reconcile_context)
return r_id
def reconcile(self, cr, uid, ids, type='auto', writeoff_acc_id=False, writeoff_period_id=False, writeoff_journal_id=False, context=None):
account_obj = self.pool.get('account.account')
move_obj = self.pool.get('account.move')
move_rec_obj = self.pool.get('account.move.reconcile')
partner_obj = self.pool.get('res.partner')
currency_obj = self.pool.get('res.currency')
lines = self.browse(cr, uid, ids, context=context)
unrec_lines = filter(lambda x: not x['reconcile_id'], lines)
credit = debit = 0.0
currency = 0.0
account_id = False
partner_id = False
if context is None:
context = {}
company_list = []
for line in lines:
if company_list and not line.company_id.id in company_list:
raise osv.except_osv(_('Warning!'), _('To reconcile the entries company should be the same for all entries.'))
company_list.append(line.company_id.id)
for line in unrec_lines:
if line.state <> 'valid':
raise osv.except_osv(_('Error!'),
_('Entry "%s" is not valid !') % line.name)
credit += line['credit']
debit += line['debit']
currency += line['amount_currency'] or 0.0
account_id = line['account_id']['id']
partner_id = (line['partner_id'] and line['partner_id']['id']) or False
writeoff = debit - credit
# Ifdate_p in context => take this date
if context.has_key('date_p') and context['date_p']:
date=context['date_p']
else:
date = time.strftime('%Y-%m-%d')
cr.execute('SELECT account_id, reconcile_id '\
'FROM account_move_line '\
'WHERE id IN %s '\
'GROUP BY account_id,reconcile_id',
(tuple(ids), ))
r = cr.fetchall()
#TODO: move this check to a constraint in the account_move_reconcile object
if len(r) != 1:
raise osv.except_osv(_('Error'), _('Entries are not of the same account or already reconciled ! '))
if not unrec_lines:
raise osv.except_osv(_('Error!'), _('Entry is already reconciled.'))
account = account_obj.browse(cr, uid, account_id, context=context)
if not account.reconcile:
raise osv.except_osv(_('Error'), _('The account is not defined to be reconciled !'))
if r[0][1] != None:
raise osv.except_osv(_('Error!'), _('Some entries are already reconciled.'))
if (not currency_obj.is_zero(cr, uid, account.company_id.currency_id, writeoff)) or \
(account.currency_id and (not currency_obj.is_zero(cr, uid, account.currency_id, currency))):
if not writeoff_acc_id:
raise osv.except_osv(_('Warning!'), _('You have to provide an account for the write off/exchange difference entry.'))
if writeoff > 0:
debit = writeoff
credit = 0.0
self_credit = writeoff
self_debit = 0.0
else:
debit = 0.0
credit = -writeoff
self_credit = 0.0
self_debit = -writeoff
# If comment exist in context, take it
if 'comment' in context and context['comment']:
libelle = context['comment']
else:
libelle = _('Write-Off')
cur_obj = self.pool.get('res.currency')
cur_id = False
amount_currency_writeoff = 0.0
if context.get('company_currency_id',False) != context.get('currency_id',False):
cur_id = context.get('currency_id',False)
for line in unrec_lines:
if line.currency_id and line.currency_id.id == context.get('currency_id',False):
amount_currency_writeoff += line.amount_currency
else:
tmp_amount = cur_obj.compute(cr, uid, line.account_id.company_id.currency_id.id, context.get('currency_id',False), abs(line.debit-line.credit), context={'date': line.date})
amount_currency_writeoff += (line.debit > 0) and tmp_amount or -tmp_amount
writeoff_lines = [
(0, 0, {
'name': libelle,
'debit': self_debit,
'credit': self_credit,
'account_id': account_id,
'date': date,
'partner_id': partner_id,
'currency_id': cur_id or (account.currency_id.id or False),
'amount_currency': amount_currency_writeoff and -1 * amount_currency_writeoff or (account.currency_id.id and -1 * currency or 0.0)
}),
(0, 0, {
'name': libelle,
'debit': debit,
'credit': credit,
'account_id': writeoff_acc_id,
'analytic_account_id': context.get('analytic_id', False),
'date': date,
'partner_id': partner_id,
'currency_id': cur_id or (account.currency_id.id or False),
'amount_currency': amount_currency_writeoff and amount_currency_writeoff or (account.currency_id.id and currency or 0.0)
})
]
writeoff_move_id = move_obj.create(cr, uid, {
'period_id': writeoff_period_id,
'journal_id': writeoff_journal_id,
'date':date,
'state': 'draft',
'line_id': writeoff_lines
})
writeoff_line_ids = self.search(cr, uid, [('move_id', '=', writeoff_move_id), ('account_id', '=', account_id)])
if account_id == writeoff_acc_id:
writeoff_line_ids = [writeoff_line_ids[1]]
ids += writeoff_line_ids
# marking the lines as reconciled does not change their validity, so there is no need
# to revalidate their moves completely.
reconcile_context = dict(context, novalidate=True)
r_id = move_rec_obj.create(cr, uid, {
'type': type,
'line_id': map(lambda x: (4, x, False), ids),
'line_partial_ids': map(lambda x: (3, x, False), ids)
}, context=reconcile_context)
# the id of the move.reconcile is written in the move.line (self) by the create method above
# because of the way the line_id are defined: (4, x, False)
for id in ids:
workflow.trg_trigger(uid, 'account.move.line', id, cr)
if lines and lines[0]:
partner_id = lines[0].partner_id and lines[0].partner_id.id or False
if partner_id and not partner_obj.has_something_to_reconcile(cr, uid, partner_id, context=context):
partner_obj.mark_as_reconciled(cr, uid, [partner_id], context=context)
return r_id
def view_header_get(self, cr, user, view_id, view_type, context=None):
if context is None:
context = {}
context = self.convert_to_period(cr, user, context=context)
if context.get('account_id', False):
cr.execute('SELECT code FROM account_account WHERE id = %s', (context['account_id'], ))
res = cr.fetchone()
if res:
res = _('Entries: ')+ (res[0] or '')
return res
if (not context.get('journal_id', False)) or (not context.get('period_id', False)):
return False
if context.get('search_default_journal_id', False):
context['journal_id'] = context.get('search_default_journal_id')
cr.execute('SELECT code FROM account_journal WHERE id = %s', (context['journal_id'], ))
j = cr.fetchone()[0] or ''
cr.execute('SELECT code FROM account_period WHERE id = %s', (context['period_id'], ))
p = cr.fetchone()[0] or ''
if j or p:
return j + (p and (':' + p) or '')
return False
def onchange_date(self, cr, user, ids, date, context=None):
"""
Returns a dict that contains new values and context
@param cr: A database cursor
@param user: ID of the user currently logged in
@param date: latest value from user input for field date
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
res = {}
if context is None:
context = {}
period_pool = self.pool.get('account.period')
pids = period_pool.find(cr, user, date, context=context)
if pids:
res.update({'period_id':pids[0]})
context = dict(context, period_id=pids[0])
return {
'value':res,
'context':context,
}
def _check_moves(self, cr, uid, context=None):
# use the first move ever created for this journal and period
if context is None:
context = {}
cr.execute('SELECT id, state, name FROM account_move WHERE journal_id = %s AND period_id = %s ORDER BY id limit 1', (context['journal_id'],context['period_id']))
res = cr.fetchone()
if res:
if res[1] != 'draft':
raise osv.except_osv(_('User Error!'),
_('The account move (%s) for centralisation ' \
'has been confirmed.') % res[2])
return res
def _remove_move_reconcile(self, cr, uid, move_ids=None, opening_reconciliation=False, context=None):
# Function remove move rencocile ids related with moves
obj_move_line = self.pool.get('account.move.line')
obj_move_rec = self.pool.get('account.move.reconcile')
unlink_ids = []
if not move_ids:
return True
recs = obj_move_line.read(cr, uid, move_ids, ['reconcile_id', 'reconcile_partial_id'])
full_recs = filter(lambda x: x['reconcile_id'], recs)
rec_ids = [rec['reconcile_id'][0] for rec in full_recs]
part_recs = filter(lambda x: x['reconcile_partial_id'], recs)
part_rec_ids = [rec['reconcile_partial_id'][0] for rec in part_recs]
unlink_ids += rec_ids
unlink_ids += part_rec_ids
all_moves = obj_move_line.search(cr, uid, ['|',('reconcile_id', 'in', unlink_ids),('reconcile_partial_id', 'in', unlink_ids)])
all_moves = list(set(all_moves) - set(move_ids))
if unlink_ids:
if opening_reconciliation:
raise osv.except_osv(_('Warning!'),
_('Opening Entries have already been generated. Please run "Cancel Closing Entries" wizard to cancel those entries and then run this wizard.'))
obj_move_rec.write(cr, uid, unlink_ids, {'opening_reconciliation': False})
obj_move_rec.unlink(cr, uid, unlink_ids)
if len(all_moves) >= 2:
obj_move_line.reconcile_partial(cr, uid, all_moves, 'auto',context=context)
return True
def unlink(self, cr, uid, ids, context=None, check=True):
if context is None:
context = {}
move_obj = self.pool.get('account.move')
self._update_check(cr, uid, ids, context)
result = False
move_ids = set()
for line in self.browse(cr, uid, ids, context=context):
move_ids.add(line.move_id.id)
localcontext = dict(context)
localcontext['journal_id'] = line.journal_id.id
localcontext['period_id'] = line.period_id.id
result = super(account_move_line, self).unlink(cr, uid, [line.id], context=localcontext)
move_ids = list(move_ids)
if check and move_ids:
move_obj.validate(cr, uid, move_ids, context=context)
return result
def write(self, cr, uid, ids, vals, context=None, check=True, update_check=True):
if context is None:
context={}
move_obj = self.pool.get('account.move')
account_obj = self.pool.get('account.account')
journal_obj = self.pool.get('account.journal')
if isinstance(ids, (int, long)):
ids = [ids]
if vals.get('account_tax_id', False):
raise osv.except_osv(_('Unable to change tax!'), _('You cannot change the tax, you should remove and recreate lines.'))
if ('account_id' in vals) and not account_obj.read(cr, uid, vals['account_id'], ['active'])['active']:
raise osv.except_osv(_('Bad Account!'), _('You cannot use an inactive account.'))
affects_move = any(f in vals for f in ('account_id', 'journal_id', 'period_id', 'move_id', 'debit', 'credit', 'date'))
if update_check and affects_move:
self._update_check(cr, uid, ids, context)
todo_date = None
if vals.get('date', False):
todo_date = vals['date']
del vals['date']
for line in self.browse(cr, uid, ids, context=context):
ctx = context.copy()
if not ctx.get('journal_id'):
if line.move_id:
ctx['journal_id'] = line.move_id.journal_id.id
else:
ctx['journal_id'] = line.journal_id.id
if not ctx.get('period_id'):
if line.move_id:
ctx['period_id'] = line.move_id.period_id.id
else:
ctx['period_id'] = line.period_id.id
#Check for centralisation
journal = journal_obj.browse(cr, uid, ctx['journal_id'], context=ctx)
if journal.centralisation:
self._check_moves(cr, uid, context=ctx)
result = super(account_move_line, self).write(cr, uid, ids, vals, context)
if affects_move and check and not context.get('novalidate'):
done = []
for line in self.browse(cr, uid, ids):
if line.move_id.id not in done:
done.append(line.move_id.id)
move_obj.validate(cr, uid, [line.move_id.id], context)
if todo_date:
move_obj.write(cr, uid, [line.move_id.id], {'date': todo_date}, context=context)
return result
def _update_journal_check(self, cr, uid, journal_id, period_id, context=None):
journal_obj = self.pool.get('account.journal')
period_obj = self.pool.get('account.period')
jour_period_obj = self.pool.get('account.journal.period')
cr.execute('SELECT state FROM account_journal_period WHERE journal_id = %s AND period_id = %s', (journal_id, period_id))
result = cr.fetchall()
journal = journal_obj.browse(cr, uid, journal_id, context=context)
period = period_obj.browse(cr, uid, period_id, context=context)
for (state,) in result:
if state == 'done':
raise osv.except_osv(_('Error!'), _('You can not add/modify entries in a closed period %s of journal %s.') % (period.name, journal.name))
if not result:
jour_period_obj.create(cr, uid, {
'name': (journal.code or journal.name)+':'+(period.name or ''),
'journal_id': journal.id,
'period_id': period.id
})
return True
def _update_check(self, cr, uid, ids, context=None):
done = {}
for line in self.browse(cr, uid, ids, context=context):
err_msg = _('Move name (id): %s (%s)') % (line.move_id.name, str(line.move_id.id))
if line.move_id.state <> 'draft' and (not line.journal_id.entry_posted):
raise osv.except_osv(_('Error!'), _('You cannot do this modification on a confirmed entry. You can just change some non legal fields or you must unconfirm the journal entry first.\n%s.') % err_msg)
if line.reconcile_id:
raise osv.except_osv(_('Error!'), _('You cannot do this modification on a reconciled entry. You can just change some non legal fields or you must unreconcile first.\n%s.') % err_msg)
t = (line.journal_id.id, line.period_id.id)
if t not in done:
self._update_journal_check(cr, uid, line.journal_id.id, line.period_id.id, context)
done[t] = True
return True
def create(self, cr, uid, vals, context=None, check=True):
account_obj = self.pool.get('account.account')
tax_obj = self.pool.get('account.tax')
move_obj = self.pool.get('account.move')
cur_obj = self.pool.get('res.currency')
journal_obj = self.pool.get('account.journal')
context = dict(context or {})
if vals.get('move_id', False):
move = self.pool.get('account.move').browse(cr, uid, vals['move_id'], context=context)
if move.company_id:
vals['company_id'] = move.company_id.id
if move.date and not vals.get('date'):
vals['date'] = move.date
if ('account_id' in vals) and not account_obj.read(cr, uid, [vals['account_id']], ['active'])[0]['active']:
raise osv.except_osv(_('Bad Account!'), _('You cannot use an inactive account.'))
if 'journal_id' in vals and vals['journal_id']:
context['journal_id'] = vals['journal_id']
if 'period_id' in vals and vals['period_id']:
context['period_id'] = vals['period_id']
if ('journal_id' not in context) and ('move_id' in vals) and vals['move_id']:
m = move_obj.browse(cr, uid, vals['move_id'])
context['journal_id'] = m.journal_id.id
context['period_id'] = m.period_id.id
#we need to treat the case where a value is given in the context for period_id as a string
if 'period_id' in context and not isinstance(context.get('period_id', ''), (int, long)):
period_candidate_ids = self.pool.get('account.period').name_search(cr, uid, name=context.get('period_id',''))
if len(period_candidate_ids) != 1:
raise osv.except_osv(_('Error!'), _('No period found or more than one period found for the given date.'))
context['period_id'] = period_candidate_ids[0][0]
if not context.get('journal_id', False) and context.get('search_default_journal_id', False):
context['journal_id'] = context.get('search_default_journal_id')
self._update_journal_check(cr, uid, context['journal_id'], context['period_id'], context)
move_id = vals.get('move_id', False)
journal = journal_obj.browse(cr, uid, context['journal_id'], context=context)
vals['journal_id'] = vals.get('journal_id') or context.get('journal_id')
vals['period_id'] = vals.get('period_id') or context.get('period_id')
vals['date'] = vals.get('date') or context.get('date')
if not move_id:
if journal.centralisation:
#Check for centralisation
res = self._check_moves(cr, uid, context)
if res:
vals['move_id'] = res[0]
if not vals.get('move_id', False):
if journal.sequence_id:
#name = self.pool.get('ir.sequence').next_by_id(cr, uid, journal.sequence_id.id)
v = {
'date': vals.get('date', time.strftime('%Y-%m-%d')),
'period_id': context['period_id'],
'journal_id': context['journal_id']
}
if vals.get('ref', ''):
v.update({'ref': vals['ref']})
move_id = move_obj.create(cr, uid, v, context)
vals['move_id'] = move_id
else:
raise osv.except_osv(_('No Piece Number!'), _('Cannot create an automatic sequence for this piece.\nPut a sequence in the journal definition for automatic numbering or create a sequence manually for this piece.'))
ok = not (journal.type_control_ids or journal.account_control_ids)
if ('account_id' in vals):
account = account_obj.browse(cr, uid, vals['account_id'], context=context)
if journal.type_control_ids:
type = account.user_type
for t in journal.type_control_ids:
if type.code == t.code:
ok = True
break
if journal.account_control_ids and not ok:
for a in journal.account_control_ids:
if a.id == vals['account_id']:
ok = True
break
# Automatically convert in the account's secondary currency if there is one and
# the provided values were not already multi-currency
if account.currency_id and 'amount_currency' not in vals and account.currency_id.id != account.company_id.currency_id.id:
vals['currency_id'] = account.currency_id.id
ctx = {}
if 'date' in vals:
ctx['date'] = vals['date']
vals['amount_currency'] = cur_obj.compute(cr, uid, account.company_id.currency_id.id,
account.currency_id.id, vals.get('debit', 0.0)-vals.get('credit', 0.0), context=ctx)
if not ok:
raise osv.except_osv(_('Bad Account!'), _('You cannot use this general account in this journal, check the tab \'Entry Controls\' on the related journal.'))
result = super(account_move_line, self).create(cr, uid, vals, context=context)
# CREATE Taxes
if vals.get('account_tax_id', False):
tax_id = tax_obj.browse(cr, uid, vals['account_tax_id'])
total = vals['debit'] - vals['credit']
base_code = 'base_code_id'
tax_code = 'tax_code_id'
account_id = 'account_collected_id'
base_sign = 'base_sign'
tax_sign = 'tax_sign'
if journal.type in ('purchase_refund', 'sale_refund') or (journal.type in ('cash', 'bank') and total < 0):
base_code = 'ref_base_code_id'
tax_code = 'ref_tax_code_id'
account_id = 'account_paid_id'
base_sign = 'ref_base_sign'
tax_sign = 'ref_tax_sign'
base_adjusted = False
for tax in tax_obj.compute_all(cr, uid, [tax_id], total, 1.00, force_excluded=False).get('taxes'):
#create the base movement
if base_adjusted == False:
base_adjusted = True
if tax_id.price_include:
total = tax['price_unit']
newvals = {
'tax_code_id': tax[base_code],
'tax_amount': tax[base_sign] * abs(total),
}
if tax_id.price_include:
if tax['price_unit'] < 0:
newvals['credit'] = abs(tax['price_unit'])
else:
newvals['debit'] = tax['price_unit']
self.write(cr, uid, [result], newvals, context=context)
else:
data = {
'move_id': vals['move_id'],
'name': tools.ustr(vals['name'] or '') + ' ' + tools.ustr(tax['name'] or ''),
'date': vals['date'],
'partner_id': vals.get('partner_id', False),
'ref': vals.get('ref', False),
'statement_id': vals.get('statement_id', False),
'account_tax_id': False,
'tax_code_id': tax[base_code],
'tax_amount': tax[base_sign] * abs(total),
'account_id': vals['account_id'],
'credit': 0.0,
'debit': 0.0,
}
self.create(cr, uid, data, context)
#create the Tax movement
if not tax['amount'] and not tax[tax_code]:
continue
data = {
'move_id': vals['move_id'],
'name': tools.ustr(vals['name'] or '') + ' ' + tools.ustr(tax['name'] or ''),
'date': vals['date'],
'partner_id': vals.get('partner_id',False),
'ref': vals.get('ref',False),
'statement_id': vals.get('statement_id', False),
'account_tax_id': False,
'tax_code_id': tax[tax_code],
'tax_amount': tax[tax_sign] * abs(tax['amount']),
'account_id': tax[account_id] or vals['account_id'],
'credit': tax['amount']<0 and -tax['amount'] or 0.0,
'debit': tax['amount']>0 and tax['amount'] or 0.0,
}
self.create(cr, uid, data, context)
del vals['account_tax_id']
recompute = journal.env.recompute and context.get('recompute', True)
if check and not context.get('novalidate') and (recompute or journal.entry_posted):
tmp = move_obj.validate(cr, uid, [vals['move_id']], context)
if journal.entry_posted and tmp:
move_obj.button_validate(cr,uid, [vals['move_id']], context)
return result
def list_periods(self, cr, uid, context=None):
ids = self.pool.get('account.period').search(cr,uid,[])
return self.pool.get('account.period').name_get(cr, uid, ids, context=context)
def list_journals(self, cr, uid, context=None):
ng = dict(self.pool.get('account.journal').name_search(cr,uid,'',[]))
ids = ng.keys()
result = []
for journal in self.pool.get('account.journal').browse(cr, uid, ids, context=context):
result.append((journal.id,ng[journal.id],journal.type,
bool(journal.currency),bool(journal.analytic_journal_id)))
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ltilve/chromium | tools/telemetry/telemetry/core/android_app_unittest.py | 3 | 2050 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import time
import unittest
from pylib.device import intent
from telemetry.core import android_app
from telemetry.core.backends import android_app_backend
from telemetry.core import platform as platform_module
from telemetry.core.platform import android_device
from telemetry.unittest_util import options_for_unittests
class AndroidAppTest(unittest.TestCase):
def setUp(self):
self._options = options_for_unittests.GetCopy()
self._device = android_device.GetDevice(self._options)
def CreateAndroidApp(self, start_intent):
platform = platform_module.GetPlatformForDevice(self._device, self._options)
platform_backend = platform._platform_backend
app_backend = android_app_backend.AndroidAppBackend(
platform_backend, start_intent)
return android_app.AndroidApp(app_backend, platform_backend)
def testWebView(self):
if self._device is None:
logging.warning('No device found, skipping test.')
return
start_intent = intent.Intent(
package='com.google.android.googlequicksearchbox',
activity='.SearchActivity',
action='com.google.android.googlequicksearchbox.GOOGLE_SEARCH',
data=None,
extras={'query': 'google'},
category=None)
search_app = self.CreateAndroidApp(start_intent)
search_process = search_app.GetProcess(':search')
search_process._UpdateDevToolsClient()
# TODO(ariblue): Replace the app used in this test with one in which the
# setWebContentsDebuggingEnabled method is called on the WebView class.
# This will configure webviews for debugging with chrome devtools inspector
# and allow us to remove this check.
if search_process._devtools_client is None:
return
webview = search_app.GetProcess(':search').GetWebViews().pop()
webview.Navigate('https://www.google.com/search?q=flowers')
time.sleep(5)
| bsd-3-clause |
bordeltabernacle/python_koans | python2/libs/colorama/ansitowin32.py | 287 | 6621 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import re
import sys
from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style
from .winterm import WinTerm, WinColor, WinStyle
from .win32 import windll
if windll is not None:
winterm = WinTerm()
def is_a_tty(stream):
return hasattr(stream, 'isatty') and stream.isatty()
class StreamWrapper(object):
'''
Wraps a stream (such as stdout), acting as a transparent proxy for all
attribute access apart from method 'write()', which is delegated to our
Converter instance.
'''
def __init__(self, wrapped, converter):
# double-underscore everything to prevent clashes with names of
# attributes on the wrapped stream object.
self.__wrapped = wrapped
self.__convertor = converter
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def write(self, text):
self.__convertor.write(text)
class AnsiToWin32(object):
'''
Implements a 'write()' method which, on Windows, will strip ANSI character
sequences from the text, and if outputting to a tty, will convert them into
win32 function calls.
'''
ANSI_RE = re.compile('\033\[((?:\d|;)*)([a-zA-Z])')
def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
# The wrapped stream (normally sys.stdout or sys.stderr)
self.wrapped = wrapped
# should we reset colors to defaults after every .write()
self.autoreset = autoreset
# create the proxy wrapping our output stream
self.stream = StreamWrapper(wrapped, self)
on_windows = sys.platform.startswith('win')
# should we strip ANSI sequences from our output?
if strip is None:
strip = on_windows
self.strip = strip
# should we should convert ANSI sequences into win32 calls?
if convert is None:
convert = on_windows and is_a_tty(wrapped)
self.convert = convert
# dict of ansi codes to win32 functions and parameters
self.win32_calls = self.get_win32_calls()
# are we wrapping stderr?
self.on_stderr = self.wrapped is sys.stderr
def should_wrap(self):
'''
True if this class is actually needed. If false, then the output
stream will not be affected, nor will win32 calls be issued, so
wrapping stdout is not actually required. This will generally be
False on non-Windows platforms, unless optional functionality like
autoreset has been requested using kwargs to init()
'''
return self.convert or self.strip or self.autoreset
def get_win32_calls(self):
if self.convert and winterm:
return {
AnsiStyle.RESET_ALL: (winterm.reset_all, ),
AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
AnsiFore.RED: (winterm.fore, WinColor.RED),
AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
AnsiFore.RESET: (winterm.fore, ),
AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
AnsiBack.RED: (winterm.back, WinColor.RED),
AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
AnsiBack.BLUE: (winterm.back, WinColor.BLUE),
AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
AnsiBack.WHITE: (winterm.back, WinColor.GREY),
AnsiBack.RESET: (winterm.back, ),
}
def write(self, text):
if self.strip or self.convert:
self.write_and_convert(text)
else:
self.wrapped.write(text)
self.wrapped.flush()
if self.autoreset:
self.reset_all()
def reset_all(self):
if self.convert:
self.call_win32('m', (0,))
elif is_a_tty(self.wrapped):
self.wrapped.write(Style.RESET_ALL)
def write_and_convert(self, text):
'''
Write the given text to our wrapped stream, stripping any ANSI
sequences from the text, and optionally converting them into win32
calls.
'''
cursor = 0
for match in self.ANSI_RE.finditer(text):
start, end = match.span()
self.write_plain_text(text, cursor, start)
self.convert_ansi(*match.groups())
cursor = end
self.write_plain_text(text, cursor, len(text))
def write_plain_text(self, text, start, end):
if start < end:
self.wrapped.write(text[start:end])
self.wrapped.flush()
def convert_ansi(self, paramstring, command):
if self.convert:
params = self.extract_params(paramstring)
self.call_win32(command, params)
def extract_params(self, paramstring):
def split(paramstring):
for p in paramstring.split(';'):
if p != '':
yield int(p)
return tuple(split(paramstring))
def call_win32(self, command, params):
if params == []:
params = [0]
if command == 'm':
for param in params:
if param in self.win32_calls:
func_args = self.win32_calls[param]
func = func_args[0]
args = func_args[1:]
kwargs = dict(on_stderr=self.on_stderr)
func(*args, **kwargs)
elif command in ('H', 'f'): # set cursor position
func = winterm.set_cursor_position
func(params, on_stderr=self.on_stderr)
elif command in ('J'):
func = winterm.erase_data
func(params, on_stderr=self.on_stderr)
elif command == 'A':
if params == () or params == None:
num_rows = 1
else:
num_rows = params[0]
func = winterm.cursor_up
func(num_rows, on_stderr=self.on_stderr)
| mit |
Fusion-Rom/android_external_chromium_org | tools/telemetry/telemetry/page/page_runner.py | 25 | 22196 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import optparse
import os
import random
import sys
import tempfile
import time
from telemetry import decorators
from telemetry.core import browser_finder
from telemetry.core import browser_info
from telemetry.core import exceptions
from telemetry.core import util
from telemetry.core import wpr_modes
from telemetry.core.platform.profiler import profiler_finder
from telemetry.page import page_filter
from telemetry.page import page_test
from telemetry.page.actions import navigate
from telemetry.page.actions import page_action
from telemetry.results import results_options
from telemetry.util import cloud_storage
from telemetry.util import exception_formatter
from telemetry.value import failure
from telemetry.value import skip
class _RunState(object):
def __init__(self):
self.browser = None
self._append_to_existing_wpr = False
self._last_archive_path = None
self._first_browser = True
self.profiler_dir = None
def StartBrowserIfNeeded(self, test, page_set, page, possible_browser,
credentials_path, archive_path, finder_options):
started_browser = not self.browser
# Create a browser.
if not self.browser:
test.CustomizeBrowserOptionsForSinglePage(page, finder_options)
possible_browser.SetReplayArchivePath(archive_path,
self._append_to_existing_wpr,
page_set.make_javascript_deterministic)
possible_browser.SetCredentialsPath(credentials_path)
self._last_archive_path = page.archive_path
test.WillStartBrowser(possible_browser.platform)
self.browser = possible_browser.Create()
test.DidStartBrowser(self.browser)
if self._first_browser:
self._first_browser = False
self.browser.credentials.WarnIfMissingCredentials(page_set)
logging.info('OS: %s %s',
self.browser.platform.GetOSName(),
self.browser.platform.GetOSVersionName())
if self.browser.supports_system_info:
system_info = self.browser.GetSystemInfo()
if system_info.model_name:
logging.info('Model: %s', system_info.model_name)
if system_info.gpu:
for i, device in enumerate(system_info.gpu.devices):
logging.info('GPU device %d: %s', i, device)
if system_info.gpu.aux_attributes:
logging.info('GPU Attributes:')
for k, v in sorted(system_info.gpu.aux_attributes.iteritems()):
logging.info(' %-20s: %s', k, v)
if system_info.gpu.feature_status:
logging.info('Feature Status:')
for k, v in sorted(system_info.gpu.feature_status.iteritems()):
logging.info(' %-20s: %s', k, v)
if system_info.gpu.driver_bug_workarounds:
logging.info('Driver Bug Workarounds:')
for workaround in system_info.gpu.driver_bug_workarounds:
logging.info(' %s', workaround)
else:
logging.info('No GPU devices')
else:
logging.warning('System info not supported')
else:
# Set up WPR path if it changed.
if page.archive_path and self._last_archive_path != page.archive_path:
self.browser.SetReplayArchivePath(
page.archive_path,
self._append_to_existing_wpr,
page_set.make_javascript_deterministic)
self._last_archive_path = page.archive_path
if self.browser.supports_tab_control and test.close_tabs_before_run:
# Create a tab if there's none.
if len(self.browser.tabs) == 0:
self.browser.tabs.New()
# Ensure only one tab is open, unless the test is a multi-tab test.
if not test.is_multi_tab_test:
while len(self.browser.tabs) > 1:
self.browser.tabs[-1].Close()
# Must wait for tab to commit otherwise it can commit after the next
# navigation has begun and RenderFrameHostManager::DidNavigateMainFrame()
# will cancel the next navigation because it's pending. This manifests as
# the first navigation in a PageSet freezing indefinitly because the
# navigation was silently cancelled when |self.browser.tabs[0]| was
# committed. Only do this when we just started the browser, otherwise
# there are cases where previous pages in a PageSet never complete
# loading so we'll wait forever.
if started_browser:
self.browser.tabs[0].WaitForDocumentReadyStateToBeComplete()
def StopBrowser(self):
if self.browser:
self.browser.Close()
self.browser = None
# Restarting the state will also restart the wpr server. If we're
# recording, we need to continue adding into the same wpr archive,
# not overwrite it.
self._append_to_existing_wpr = True
def StartProfiling(self, page, finder_options):
if not self.profiler_dir:
self.profiler_dir = tempfile.mkdtemp()
output_file = os.path.join(self.profiler_dir, page.file_safe_name)
is_repeating = (finder_options.page_repeat != 1 or
finder_options.pageset_repeat != 1)
if is_repeating:
output_file = util.GetSequentialFileName(output_file)
self.browser.platform.profiling_controller.Start(
finder_options.profiler, output_file)
def StopProfiling(self):
if self.browser:
self.browser.platform.profiling_controller.Stop()
class PageState(object):
def __init__(self, page, tab):
self.page = page
self.tab = tab
self._did_login = False
def PreparePage(self, test=None):
if self.page.is_file:
self.tab.browser.SetHTTPServerDirectories(
self.page.page_set.serving_dirs | set([self.page.serving_dir]))
if self.page.credentials:
if not self.tab.browser.credentials.LoginNeeded(
self.tab, self.page.credentials):
raise page_test.Failure('Login as ' + self.page.credentials + ' failed')
self._did_login = True
if test:
if test.clear_cache_before_each_run:
self.tab.ClearCache(force=True)
def ImplicitPageNavigation(self, test=None):
"""Executes the implicit navigation that occurs for every page iteration.
This function will be called once per page before any actions are executed.
"""
if test:
test.WillNavigateToPage(self.page, self.tab)
test.RunNavigateSteps(self.page, self.tab)
test.DidNavigateToPage(self.page, self.tab)
else:
i = navigate.NavigateAction()
i.RunAction(self.page, self.tab, None)
def CleanUpPage(self, test):
test.CleanUpAfterPage(self.page, self.tab)
if self.page.credentials and self._did_login:
self.tab.browser.credentials.LoginNoLongerNeeded(
self.tab, self.page.credentials)
def AddCommandLineArgs(parser):
page_filter.PageFilter.AddCommandLineArgs(parser)
results_options.AddResultsOptions(parser)
# Page set options
group = optparse.OptionGroup(parser, 'Page set ordering and repeat options')
group.add_option('--pageset-shuffle', action='store_true',
dest='pageset_shuffle',
help='Shuffle the order of pages within a pageset.')
group.add_option('--pageset-shuffle-order-file',
dest='pageset_shuffle_order_file', default=None,
help='Filename of an output of a previously run test on the current '
'pageset. The tests will run in the same order again, overriding '
'what is specified by --page-repeat and --pageset-repeat.')
group.add_option('--page-repeat', default=1, type='int',
help='Number of times to repeat each individual page '
'before proceeding with the next page in the pageset.')
group.add_option('--pageset-repeat', default=1, type='int',
help='Number of times to repeat the entire pageset.')
group.add_option('--max-failures', default=None, type='int',
help='Maximum number of test failures before aborting '
'the run. Defaults to the number specified by the '
'PageTest.')
parser.add_option_group(group)
# WPR options
group = optparse.OptionGroup(parser, 'Web Page Replay options')
group.add_option('--use-live-sites',
dest='use_live_sites', action='store_true',
help='Run against live sites and ignore the Web Page Replay archives.')
parser.add_option_group(group)
parser.add_option('-d', '--also-run-disabled-tests',
dest='run_disabled_tests',
action='store_true', default=False,
help='Ignore @Disabled and @Enabled restrictions.')
def ProcessCommandLineArgs(parser, args):
page_filter.PageFilter.ProcessCommandLineArgs(parser, args)
# Page set options
if args.pageset_shuffle_order_file and not args.pageset_shuffle:
parser.error('--pageset-shuffle-order-file requires --pageset-shuffle.')
if args.page_repeat < 1:
parser.error('--page-repeat must be a positive integer.')
if args.pageset_repeat < 1:
parser.error('--pageset-repeat must be a positive integer.')
def _PrepareAndRunPage(test, page_set, expectations, finder_options,
browser_options, page, credentials_path,
possible_browser, results, state):
if finder_options.use_live_sites:
browser_options.wpr_mode = wpr_modes.WPR_OFF
elif browser_options.wpr_mode != wpr_modes.WPR_RECORD:
browser_options.wpr_mode = (
wpr_modes.WPR_REPLAY
if page.archive_path and os.path.isfile(page.archive_path)
else wpr_modes.WPR_OFF)
max_attempts = test.attempts
attempt_num = 0
while attempt_num < max_attempts:
attempt_num += 1
try:
results.WillAttemptPageRun(attempt_num, max_attempts)
if test.RestartBrowserBeforeEachPage() or page.startup_url:
state.StopBrowser()
# If we are restarting the browser for each page customize the per page
# options for just the current page before starting the browser.
state.StartBrowserIfNeeded(test, page_set, page, possible_browser,
credentials_path, page.archive_path,
finder_options)
if not page.CanRunOnBrowser(browser_info.BrowserInfo(state.browser)):
logging.info('Skip test for page %s because browser is not supported.'
% page.url)
return
expectation = expectations.GetExpectationForPage(state.browser, page)
_WaitForThermalThrottlingIfNeeded(state.browser.platform)
if finder_options.profiler:
state.StartProfiling(page, finder_options)
try:
_RunPage(test, page, state, expectation, results)
_CheckThermalThrottling(state.browser.platform)
except exceptions.TabCrashException as e:
if test.is_multi_tab_test:
logging.error('Aborting multi-tab test after tab %s crashed',
page.url)
raise
logging.warning(str(e))
state.StopBrowser()
if finder_options.profiler:
state.StopProfiling()
if (test.StopBrowserAfterPage(state.browser, page)):
state.StopBrowser()
return
except exceptions.BrowserGoneException as e:
state.StopBrowser()
if attempt_num == max_attempts:
logging.error('Aborting after too many retries')
raise
if test.is_multi_tab_test:
logging.error('Aborting multi-tab test after browser crashed')
raise
logging.warning(str(e))
@decorators.Cache
def _UpdateCredentials(page_set):
# Attempt to download the credentials file.
if page_set.credentials_path:
try:
cloud_storage.GetIfChanged(
os.path.join(page_set.base_dir, page_set.credentials_path))
except (cloud_storage.CredentialsError, cloud_storage.PermissionError,
cloud_storage.CloudStorageError) as e:
logging.warning('Cannot retrieve credential file %s due to cloud storage '
'error %s', page_set.credentials_path, str(e))
@decorators.Cache
def _UpdatePageSetArchivesIfChanged(page_set):
# Scan every serving directory for .sha1 files
# and download them from Cloud Storage. Assume all data is public.
all_serving_dirs = page_set.serving_dirs.copy()
# Add individual page dirs to all serving dirs.
for page in page_set:
if page.is_file:
all_serving_dirs.add(page.serving_dir)
# Scan all serving dirs.
for serving_dir in all_serving_dirs:
if os.path.splitdrive(serving_dir)[1] == '/':
raise ValueError('Trying to serve root directory from HTTP server.')
for dirpath, _, filenames in os.walk(serving_dir):
for filename in filenames:
path, extension = os.path.splitext(
os.path.join(dirpath, filename))
if extension != '.sha1':
continue
cloud_storage.GetIfChanged(path, page_set.bucket)
def Run(test, page_set, expectations, finder_options, results):
"""Runs a given test against a given page_set with the given options."""
test.ValidatePageSet(page_set)
# Create a possible_browser with the given options.
try:
possible_browser = browser_finder.FindBrowser(finder_options)
except browser_finder.BrowserTypeRequiredException, e:
sys.stderr.write(str(e) + '\n')
sys.exit(-1)
if not possible_browser:
sys.stderr.write(
'No browser found. Available browsers:\n%s\n' %
'\n'.join(browser_finder.GetAllAvailableBrowserTypes(finder_options)))
sys.exit(-1)
browser_options = possible_browser.finder_options.browser_options
browser_options.browser_type = possible_browser.browser_type
test.CustomizeBrowserOptions(browser_options)
if (not decorators.IsEnabled(test, possible_browser) and
not finder_options.run_disabled_tests):
logging.warning('You are trying to run a disabled test.')
logging.warning('Pass --also-run-disabled-tests to squelch this message.')
return
if possible_browser.IsRemote():
possible_browser.RunRemote()
sys.exit(0)
# Reorder page set based on options.
pages = _ShuffleAndFilterPageSet(page_set, finder_options)
if not finder_options.use_live_sites:
_UpdateCredentials(page_set)
if browser_options.wpr_mode != wpr_modes.WPR_RECORD:
_UpdatePageSetArchivesIfChanged(page_set)
pages = _CheckArchives(page_set, pages, results)
# Verify credentials path.
credentials_path = None
if page_set.credentials_path:
credentials_path = os.path.join(os.path.dirname(page_set.file_path),
page_set.credentials_path)
if not os.path.exists(credentials_path):
credentials_path = None
# Set up user agent.
browser_options.browser_user_agent_type = page_set.user_agent_type or None
if finder_options.profiler:
profiler_class = profiler_finder.FindProfiler(finder_options.profiler)
profiler_class.CustomizeBrowserOptions(browser_options.browser_type,
finder_options)
for page in list(pages):
if not test.CanRunForPage(page):
results.WillRunPage(page)
logging.debug('Skipping test: it cannot run for %s', page.url)
results.AddValue(skip.SkipValue(page, 'Test cannot run'))
results.DidRunPage(page)
pages.remove(page)
if not pages:
return
state = _RunState()
pages_with_discarded_first_result = set()
max_failures = finder_options.max_failures # command-line gets priority
if max_failures is None:
max_failures = test.max_failures # may be None
try:
test.WillRunTest(finder_options)
for _ in xrange(finder_options.pageset_repeat):
for page in pages:
if test.IsExiting():
break
for _ in xrange(finder_options.page_repeat):
results.WillRunPage(page)
try:
_PrepareAndRunPage(
test, page_set, expectations, finder_options, browser_options,
page, credentials_path, possible_browser, results, state)
finally:
discard_run = (test.discard_first_result and
page not in pages_with_discarded_first_result)
if discard_run:
pages_with_discarded_first_result.add(page)
results.DidRunPage(page, discard_run=discard_run)
if max_failures is not None and len(results.failures) > max_failures:
logging.error('Too many failures. Aborting.')
test.RequestExit()
finally:
test.DidRunTest(state.browser, results)
state.StopBrowser()
def _ShuffleAndFilterPageSet(page_set, finder_options):
if finder_options.pageset_shuffle_order_file:
return page_set.ReorderPageSet(finder_options.pageset_shuffle_order_file)
pages = [page for page in page_set.pages[:]
if not page.disabled and page_filter.PageFilter.IsSelected(page)]
if finder_options.pageset_shuffle:
random.shuffle(pages)
return pages
def _CheckArchives(page_set, pages, results):
"""Returns a subset of pages that are local or have WPR archives.
Logs warnings if any are missing.
"""
# Warn of any problems with the entire page set.
if any(not p.is_local for p in pages):
if not page_set.archive_data_file:
logging.warning('The page set is missing an "archive_data_file" '
'property. Skipping any live sites. To include them, '
'pass the flag --use-live-sites.')
if not page_set.wpr_archive_info:
logging.warning('The archive info file is missing. '
'To fix this, either add svn-internal to your '
'.gclient using http://goto/read-src-internal, '
'or create a new archive using record_wpr.')
# Warn of any problems with individual pages and return valid pages.
pages_missing_archive_path = []
pages_missing_archive_data = []
valid_pages = []
for page in pages:
if not page.is_local and not page.archive_path:
pages_missing_archive_path.append(page)
elif not page.is_local and not os.path.isfile(page.archive_path):
pages_missing_archive_data.append(page)
else:
valid_pages.append(page)
if pages_missing_archive_path:
logging.warning('The page set archives for some pages do not exist. '
'Skipping those pages. To fix this, record those pages '
'using record_wpr. To ignore this warning and run '
'against live sites, pass the flag --use-live-sites.')
if pages_missing_archive_data:
logging.warning('The page set archives for some pages are missing. '
'Someone forgot to check them in, or they were deleted. '
'Skipping those pages. To fix this, record those pages '
'using record_wpr. To ignore this warning and run '
'against live sites, pass the flag --use-live-sites.')
for page in pages_missing_archive_path + pages_missing_archive_data:
results.WillRunPage(page)
results.AddValue(failure.FailureValue.FromMessage(
page, 'Page set archive doesn\'t exist.'))
results.DidRunPage(page)
return valid_pages
def _RunPage(test, page, state, expectation, results):
if expectation == 'skip':
logging.debug('Skipping test: Skip expectation for %s', page.url)
results.AddValue(skip.SkipValue(page, 'Skipped by test expectations'))
return
page_state = PageState(page, test.TabForPage(page, state.browser))
def ProcessError():
if expectation == 'fail':
msg = 'Expected exception while running %s' % page.url
else:
msg = 'Exception while running %s' % page.url
results.AddValue(failure.FailureValue(page, sys.exc_info()))
exception_formatter.PrintFormattedException(msg=msg)
try:
page_state.PreparePage(test)
page_state.ImplicitPageNavigation(test)
test.RunPage(page, page_state.tab, results)
util.CloseConnections(page_state.tab)
except page_test.TestNotSupportedOnPlatformFailure:
raise
except page_test.Failure:
if expectation == 'fail':
exception_formatter.PrintFormattedException(
msg='Expected failure while running %s' % page.url)
else:
exception_formatter.PrintFormattedException(
msg='Failure while running %s' % page.url)
results.AddValue(failure.FailureValue(page, sys.exc_info()))
except (util.TimeoutException, exceptions.LoginException,
exceptions.ProfilingException):
ProcessError()
except (exceptions.TabCrashException, exceptions.BrowserGoneException):
ProcessError()
# Run() catches these exceptions to relaunch the tab/browser, so re-raise.
raise
except page_action.PageActionNotSupported as e:
results.AddValue(skip.SkipValue(page, 'Unsupported page action: %s' % e))
except Exception:
exception_formatter.PrintFormattedException(
msg='Unhandled exception while running %s' % page.url)
results.AddValue(failure.FailureValue(page, sys.exc_info()))
else:
if expectation == 'fail':
logging.warning('%s was expected to fail, but passed.\n', page.url)
finally:
page_state.CleanUpPage(test)
def _WaitForThermalThrottlingIfNeeded(platform):
if not platform.CanMonitorThermalThrottling():
return
thermal_throttling_retry = 0
while (platform.IsThermallyThrottled() and
thermal_throttling_retry < 3):
logging.warning('Thermally throttled, waiting (%d)...',
thermal_throttling_retry)
thermal_throttling_retry += 1
time.sleep(thermal_throttling_retry * 2)
if thermal_throttling_retry and platform.IsThermallyThrottled():
logging.warning('Device is thermally throttled before running '
'performance tests, results will vary.')
def _CheckThermalThrottling(platform):
if not platform.CanMonitorThermalThrottling():
return
if platform.HasBeenThermallyThrottled():
logging.warning('Device has been thermally throttled during '
'performance tests, results will vary.')
| bsd-3-clause |
marcusmueller/gnuradio | grc/core/utils/backports/shlex.py | 7 | 1362 | # Copyright 2016 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
from __future__ import absolute_import
import re
import shlex
# back port from python3
_find_unsafe = re.compile(r'[^\w@%+=:,./-]').search
def _shlex_quote(s):
"""Return a shell-escaped version of the string *s*."""
if not s:
return "''"
if _find_unsafe(s) is None:
return s
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return "'" + s.replace("'", "'\"'\"'") + "'"
if not hasattr(shlex, 'quote'):
quote = _shlex_quote
else:
quote = shlex.quote
split = shlex.split
| gpl-3.0 |
iu5team/rms | app/utils/db_utils.py | 1 | 1349 | import abc
import sqlite3
from django.conf import settings
class Connection(object):
obj = None
DB_PATH = None
def __init__(self):
super(Connection, self).__init__()
self.db_name = settings.DATABASES['default']['NAME'] if self.DB_PATH is None else self.DB_PATH
self.conn = sqlite3.connect(self.db_name,
detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES,
check_same_thread=False)
@classmethod
def get_connection(cls):
if cls.obj is None:
cls.obj = Connection()
return cls.obj.conn
@staticmethod
def get_cursor_description(cursor):
if cursor.description is not None:
return list(map(lambda l: list(l)[0], list(cursor.description)))
return None
@staticmethod
def row_to_dict(row, desc):
return dict(zip(desc, row))
class DoesNotExist(Exception):
GATEWAY_CLASS = None
def __init__(self, entity_id=None):
self.entity_id = entity_id
def __str__(self):
if self.entity_id is not None:
return "Entity of type '{}' with id {} not found".format(self.GATEWAY_CLASS.TABLE_NAME, self.entity_id)
else:
return "Entity of type '{}' does not exists".format(self.GATEWAY_CLASS.TABLE_NAME)
| mit |
diging/jars | cookies/tests/test_aggregate.py | 1 | 8457 | import unittest, mock, shutil, tempfile, os
from cookies import aggregate
from cookies.models import *
from django.db import transaction
class TestAggregate(unittest.TestCase):
def setUp(self):
with transaction.atomic():
self.user = User.objects.create(username='bob')
self.supercollection = Collection.objects.create(name='supercollection')
self.collection = Collection.objects.create(name='collection', part_of=self.supercollection)
self.container = ResourceContainer.objects.create(created_by=self.user, part_of=self.collection)
self.resource = Resource.objects.create(name='test resource', container=self.container)
self.container.primary = self.resource
self.container.save()
self.container2 = ResourceContainer.objects.create(created_by=self.user, part_of=self.supercollection)
self.resource2 = Resource.objects.create(name='test resource 2', container=self.container2)
self.container2.primary = self.resource2
self.container2.save()
self.isPartOf, _ = Field.objects.get_or_create(uri='http://purl.org/dc/terms/isPartOf')
def create_content_resource(resource, url, content_type):
content = Resource.objects.create(content_resource=True, is_external=True, external_source=Resource.WEB, location=url, content_type=content_type, container=resource.container)
ContentRelation.objects.create(for_resource=resource, content_resource=content, content_type=content_type, container=resource.container)
for resource in [self.resource, self.resource2]:
for i in xrange(3):
r = Resource.objects.create(name='subordinate %i' % i, container=resource.container)
Relation.objects.create(source=r, predicate=self.isPartOf, target=resource, container=resource.container)
if i < 2:
create_content_resource(r, 'http://asdf.com/%i.txt' % i, 'application/pdf')
continue
for j in xrange(2):
r2 = Resource.objects.create(name='sub-subordinate %i:%i' % (i, j), container=resource.container)
Relation.objects.create(source=r2, predicate=self.isPartOf, target=r, container=resource.container)
create_content_resource(r2, 'http://asdf.com/%i_%i.txt' % (i, j), 'text/plain')
# def test_aggregate_content_resources(self):
# """
# :func:`cookies.aggregate.aggregate_content_resources` should return a
# generator that yields all of the content resources attached to the
# passed set of resources.
# """
# agg = aggregate.aggregate_content_resources(iter([self.resource, self.resource2]))
# self.assertEqual(len(list(agg)), Resource.objects.filter(content_resource=True).count())
#
# for obj in aggregate.aggregate_content_resources(Resource.objects.filter(is_primary_for__isnull=False)):
# self.assertIsInstance(obj, Resource)
# self.assertTrue(obj.content_resource)
def test_aggregate_content_resources_ctype(self):
"""
Specifying ``content_type`` will limit to those with the correct
content type.
"""
qs = Resource.objects.filter(is_primary_for__isnull=False)
agg = aggregate.aggregate_content_resources(qs, content_type='text/plain')
self.assertEqual(len(list(agg)), Resource.objects.filter(content_resource=True, content_type='text/plain').count())
agg = aggregate.aggregate_content_resources(qs, content_type='application/pdf')
self.assertEqual(len(list(agg)), Resource.objects.filter(content_resource=True, content_type='application/pdf').count())
@mock.patch('cookies.accession.WebRemote.get')
def test_aggregate_content(self, mock_get):
"""
:func:`cookies.aggregate.aggregate_content_resources` should return a
generator that yields raw content.
"""
secret_message = 'nananana, hey hey'
mock_get.return_value = secret_message
qs = Resource.objects.filter(is_primary_for__isnull=False)
agg = aggregate.aggregate_content(qs, content_type='text/plain')
for raw in agg:
self.assertEqual(raw, secret_message)
@mock.patch('cookies.accession.WebRemote.get')
def test_aggregate_content_with_proc(self, mock_get):
"""
If ``proc`` is passed, then the return value of that function is
returned instead.
"""
secret_message = 'nananana, hey hey'
mock_get.return_value = secret_message
proc = lambda content, resource: content[:2]
qs = Resource.objects.filter(is_primary_for__isnull=False)
agg = aggregate.aggregate_content(qs, proc=proc, content_type='text/plain')
for raw in agg:
self.assertEqual(raw, secret_message[:2])
@mock.patch('cookies.accession.WebRemote.get')
def test_export(self, mock_get):
"""
"""
secret_message = 'nananana, hey hey'
mock_get.return_value = secret_message
proc = lambda content, resource: content[:2]
qs = Resource.objects.filter(is_primary_for__isnull=False)
target_path = tempfile.mkdtemp()
aggregate.export(qs, target_path, proc=proc, content_type='text/plain')
for fname in os.listdir(target_path):
if fname.endswith('.txt'):
with open(os.path.join(target_path, fname)) as f:
self.assertEqual(f.read(), secret_message[:2])
shutil.rmtree(target_path)
@mock.patch('cookies.accession.WebRemote.get')
def test_export_gz(self, mock_get):
"""
"""
import smart_open
secret_message = 'nananana, hey hey'
mock_get.return_value = secret_message
proc = lambda content, resource: content[:2]
fname = lambda resource: '%i.txt.gz' % resource.id
qs = Resource.objects.filter(is_primary_for__isnull=False)
target_path = tempfile.mkdtemp()
aggregate.export(qs, target_path, fname=fname, proc=proc,
content_type='text/plain')
for fname in os.listdir(target_path):
if fname.endswith('.txt.gz'):
with smart_open.smart_open(os.path.join(target_path, fname)) as f:
self.assertEqual(f.read(), secret_message[:2])
shutil.rmtree(target_path)
@mock.patch('cookies.accession.WebRemote.get')
def test_export_zip(self, mock_get):
"""
"""
import smart_open
secret_message = 'nananana, hey hey'
mock_get.return_value = secret_message
proc = lambda content, resource: content[:2]
fname = lambda resource: '%i.txt' % resource.id
qs = Resource.objects.filter(is_primary_for__isnull=False)
target_path = tempfile.mkdtemp() + 'test.zip'
aggregate.export_zip(qs, target_path, fname=fname, proc=proc,
content_type='text/plain')
# TODO: actually open and evaluate the archive contents.
@mock.patch('cookies.accession.WebRemote.get')
def test_export_collection(self, mock_get):
"""
"""
import smart_open, os, urlparse
secret_message = 'nananana, hey hey'
mock_get.return_value = secret_message
proc = lambda content, resource: content[:2]
def fname(resource):
def get_collection_name(collection):
if collection is None:
return 'resources'
return get_collection_name(collection.part_of) + '/' + collection.name
if resource.is_external:
filename = urlparse.urlparse(resource.location).path.split('/')[-1]
else:
filename = os.path.split(resource.file.path)[-1]
return get_collection_name(resource.container.part_of) + '/' + filename
qs = Resource.objects.filter(is_primary_for__isnull=False)
target_path = tempfile.mkdtemp() + 'test.zip'
aggregate.export_zip(qs, target_path, fname=fname, proc=proc,
content_type='text/plain')
# TODO: actually open and evaluate the archive contents.
def tearDown(self):
for model in [Resource, Relation, ContentRelation, ResourceContainer, User]:
model.objects.all().delete()
| gpl-3.0 |
cedi4155476/QGIS | python/plugins/processing/algs/qgis/RandomPointsLayer.py | 5 | 4796 | # -*- coding: utf-8 -*-
"""
***************************************************************************
RandomPointsLayer.py
---------------------
Date : April 2014
Copyright : (C) 2014 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'April 2014'
__copyright__ = '(C) 2014, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import random
from PyQt4.QtCore import QVariant
from qgis.core import QGis, QgsGeometry, QgsFields, QgsField, QgsSpatialIndex, QgsPoint, QgsFeature, QgsFeatureRequest
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.ProcessingLog import ProcessingLog
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
class RandomPointsLayer(GeoAlgorithm):
VECTOR = 'VECTOR'
POINT_NUMBER = 'POINT_NUMBER'
MIN_DISTANCE = 'MIN_DISTANCE'
OUTPUT = 'OUTPUT'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Random points in layer bounds')
self.group, self.i18n_group = self.trAlgorithm('Vector creation tools')
self.addParameter(ParameterVector(self.VECTOR,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_POLYGON]))
self.addParameter(ParameterNumber(self.POINT_NUMBER,
self.tr('Points number'), 1, 9999999, 1))
self.addParameter(ParameterNumber(self.MIN_DISTANCE,
self.tr('Minimum distance'), 0.0, 9999999, 0.0))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Random points')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.VECTOR))
pointCount = int(self.getParameterValue(self.POINT_NUMBER))
minDistance = float(self.getParameterValue(self.MIN_DISTANCE))
bbox = layer.extent()
idxLayer = vector.spatialindex(layer)
fields = QgsFields()
fields.append(QgsField('id', QVariant.Int, '', 10, 0))
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(
fields, QGis.WKBPoint, layer.dataProvider().crs())
nPoints = 0
nIterations = 0
maxIterations = pointCount * 200
total = 100.0 / pointCount
index = QgsSpatialIndex()
points = dict()
request = QgsFeatureRequest()
random.seed()
while nIterations < maxIterations and nPoints < pointCount:
rx = bbox.xMinimum() + bbox.width() * random.random()
ry = bbox.yMinimum() + bbox.height() * random.random()
pnt = QgsPoint(rx, ry)
geom = QgsGeometry.fromPoint(pnt)
ids = idxLayer.intersects(geom.buffer(5, 5).boundingBox())
if len(ids) > 0 and \
vector.checkMinDistance(pnt, index, minDistance, points):
for i in ids:
f = layer.getFeatures(request.setFilterFid(i)).next()
tmpGeom = QgsGeometry(f.geometry())
if geom.within(tmpGeom):
f = QgsFeature(nPoints)
f.initAttributes(1)
f.setFields(fields)
f.setAttribute('id', nPoints)
f.setGeometry(geom)
writer.addFeature(f)
index.insertFeature(f)
points[nPoints] = pnt
nPoints += 1
progress.setPercentage(int(nPoints * total))
nIterations += 1
if nPoints < pointCount:
ProcessingLog.addToLog(ProcessingLog.LOG_INFO,
self.tr('Can not generate requested number of random points. '
'Maximum number of attempts exceeded.'))
del writer
| gpl-2.0 |
nateprewitt/pipenv | pipenv/patched/pip/_vendor/distro.py | 330 | 38349 | # Copyright 2015,2016 Nir Cohen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The ``distro`` package (``distro`` stands for Linux Distribution) provides
information about the Linux distribution it runs on, such as a reliable
machine-readable distro ID, or version information.
It is a renewed alternative implementation for Python's original
:py:func:`platform.linux_distribution` function, but it provides much more
functionality. An alternative implementation became necessary because Python
3.5 deprecated this function, and Python 3.7 is expected to remove it
altogether. Its predecessor function :py:func:`platform.dist` was already
deprecated since Python 2.6 and is also expected to be removed in Python 3.7.
Still, there are many cases in which access to Linux distribution information
is needed. See `Python issue 1322 <https://bugs.python.org/issue1322>`_ for
more information.
"""
import os
import re
import sys
import json
import shlex
import logging
import subprocess
if not sys.platform.startswith('linux'):
raise ImportError('Unsupported platform: {0}'.format(sys.platform))
_UNIXCONFDIR = '/etc'
_OS_RELEASE_BASENAME = 'os-release'
#: Translation table for normalizing the "ID" attribute defined in os-release
#: files, for use by the :func:`distro.id` method.
#:
#: * Key: Value as defined in the os-release file, translated to lower case,
#: with blanks translated to underscores.
#:
#: * Value: Normalized value.
NORMALIZED_OS_ID = {}
#: Translation table for normalizing the "Distributor ID" attribute returned by
#: the lsb_release command, for use by the :func:`distro.id` method.
#:
#: * Key: Value as returned by the lsb_release command, translated to lower
#: case, with blanks translated to underscores.
#:
#: * Value: Normalized value.
NORMALIZED_LSB_ID = {
'enterpriseenterprise': 'oracle', # Oracle Enterprise Linux
'redhatenterpriseworkstation': 'rhel', # RHEL 6.7
}
#: Translation table for normalizing the distro ID derived from the file name
#: of distro release files, for use by the :func:`distro.id` method.
#:
#: * Key: Value as derived from the file name of a distro release file,
#: translated to lower case, with blanks translated to underscores.
#:
#: * Value: Normalized value.
NORMALIZED_DISTRO_ID = {
'redhat': 'rhel', # RHEL 6.x, 7.x
}
# Pattern for content of distro release file (reversed)
_DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile(
r'(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)')
# Pattern for base file name of distro release file
_DISTRO_RELEASE_BASENAME_PATTERN = re.compile(
r'(\w+)[-_](release|version)$')
# Base file names to be ignored when searching for distro release file
_DISTRO_RELEASE_IGNORE_BASENAMES = (
'debian_version',
'lsb-release',
'oem-release',
_OS_RELEASE_BASENAME,
'system-release'
)
def linux_distribution(full_distribution_name=True):
"""
Return information about the current Linux distribution as a tuple
``(id_name, version, codename)`` with items as follows:
* ``id_name``: If *full_distribution_name* is false, the result of
:func:`distro.id`. Otherwise, the result of :func:`distro.name`.
* ``version``: The result of :func:`distro.version`.
* ``codename``: The result of :func:`distro.codename`.
The interface of this function is compatible with the original
:py:func:`platform.linux_distribution` function, supporting a subset of
its parameters.
The data it returns may not exactly be the same, because it uses more data
sources than the original function, and that may lead to different data if
the Linux distribution is not consistent across multiple data sources it
provides (there are indeed such distributions ...).
Another reason for differences is the fact that the :func:`distro.id`
method normalizes the distro ID string to a reliable machine-readable value
for a number of popular Linux distributions.
"""
return _distro.linux_distribution(full_distribution_name)
def id():
"""
Return the distro ID of the current Linux distribution, as a
machine-readable string.
For a number of Linux distributions, the returned distro ID value is
*reliable*, in the sense that it is documented and that it does not change
across releases of the distribution.
This package maintains the following reliable distro ID values:
============== =========================================
Distro ID Distribution
============== =========================================
"ubuntu" Ubuntu
"debian" Debian
"rhel" RedHat Enterprise Linux
"centos" CentOS
"fedora" Fedora
"sles" SUSE Linux Enterprise Server
"opensuse" openSUSE
"amazon" Amazon Linux
"arch" Arch Linux
"cloudlinux" CloudLinux OS
"exherbo" Exherbo Linux
"gentoo" GenToo Linux
"ibm_powerkvm" IBM PowerKVM
"kvmibm" KVM for IBM z Systems
"linuxmint" Linux Mint
"mageia" Mageia
"mandriva" Mandriva Linux
"parallels" Parallels
"pidora" Pidora
"raspbian" Raspbian
"oracle" Oracle Linux (and Oracle Enterprise Linux)
"scientific" Scientific Linux
"slackware" Slackware
"xenserver" XenServer
============== =========================================
If you have a need to get distros for reliable IDs added into this set,
or if you find that the :func:`distro.id` function returns a different
distro ID for one of the listed distros, please create an issue in the
`distro issue tracker`_.
**Lookup hierarchy and transformations:**
First, the ID is obtained from the following sources, in the specified
order. The first available and non-empty value is used:
* the value of the "ID" attribute of the os-release file,
* the value of the "Distributor ID" attribute returned by the lsb_release
command,
* the first part of the file name of the distro release file,
The so determined ID value then passes the following transformations,
before it is returned by this method:
* it is translated to lower case,
* blanks (which should not be there anyway) are translated to underscores,
* a normalization of the ID is performed, based upon
`normalization tables`_. The purpose of this normalization is to ensure
that the ID is as reliable as possible, even across incompatible changes
in the Linux distributions. A common reason for an incompatible change is
the addition of an os-release file, or the addition of the lsb_release
command, with ID values that differ from what was previously determined
from the distro release file name.
"""
return _distro.id()
def name(pretty=False):
"""
Return the name of the current Linux distribution, as a human-readable
string.
If *pretty* is false, the name is returned without version or codename.
(e.g. "CentOS Linux")
If *pretty* is true, the version and codename are appended.
(e.g. "CentOS Linux 7.1.1503 (Core)")
**Lookup hierarchy:**
The name is obtained from the following sources, in the specified order.
The first available and non-empty value is used:
* If *pretty* is false:
- the value of the "NAME" attribute of the os-release file,
- the value of the "Distributor ID" attribute returned by the lsb_release
command,
- the value of the "<name>" field of the distro release file.
* If *pretty* is true:
- the value of the "PRETTY_NAME" attribute of the os-release file,
- the value of the "Description" attribute returned by the lsb_release
command,
- the value of the "<name>" field of the distro release file, appended
with the value of the pretty version ("<version_id>" and "<codename>"
fields) of the distro release file, if available.
"""
return _distro.name(pretty)
def version(pretty=False, best=False):
"""
Return the version of the current Linux distribution, as a human-readable
string.
If *pretty* is false, the version is returned without codename (e.g.
"7.0").
If *pretty* is true, the codename in parenthesis is appended, if the
codename is non-empty (e.g. "7.0 (Maipo)").
Some distributions provide version numbers with different precisions in
the different sources of distribution information. Examining the different
sources in a fixed priority order does not always yield the most precise
version (e.g. for Debian 8.2, or CentOS 7.1).
The *best* parameter can be used to control the approach for the returned
version:
If *best* is false, the first non-empty version number in priority order of
the examined sources is returned.
If *best* is true, the most precise version number out of all examined
sources is returned.
**Lookup hierarchy:**
In all cases, the version number is obtained from the following sources.
If *best* is false, this order represents the priority order:
* the value of the "VERSION_ID" attribute of the os-release file,
* the value of the "Release" attribute returned by the lsb_release
command,
* the version number parsed from the "<version_id>" field of the first line
of the distro release file,
* the version number parsed from the "PRETTY_NAME" attribute of the
os-release file, if it follows the format of the distro release files.
* the version number parsed from the "Description" attribute returned by
the lsb_release command, if it follows the format of the distro release
files.
"""
return _distro.version(pretty, best)
def version_parts(best=False):
"""
Return the version of the current Linux distribution as a tuple
``(major, minor, build_number)`` with items as follows:
* ``major``: The result of :func:`distro.major_version`.
* ``minor``: The result of :func:`distro.minor_version`.
* ``build_number``: The result of :func:`distro.build_number`.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.version_parts(best)
def major_version(best=False):
"""
Return the major version of the current Linux distribution, as a string,
if provided.
Otherwise, the empty string is returned. The major version is the first
part of the dot-separated version string.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.major_version(best)
def minor_version(best=False):
"""
Return the minor version of the current Linux distribution, as a string,
if provided.
Otherwise, the empty string is returned. The minor version is the second
part of the dot-separated version string.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.minor_version(best)
def build_number(best=False):
"""
Return the build number of the current Linux distribution, as a string,
if provided.
Otherwise, the empty string is returned. The build number is the third part
of the dot-separated version string.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.build_number(best)
def like():
"""
Return a space-separated list of distro IDs of distributions that are
closely related to the current Linux distribution in regards to packaging
and programming interfaces, for example distributions the current
distribution is a derivative from.
**Lookup hierarchy:**
This information item is only provided by the os-release file.
For details, see the description of the "ID_LIKE" attribute in the
`os-release man page
<http://www.freedesktop.org/software/systemd/man/os-release.html>`_.
"""
return _distro.like()
def codename():
"""
Return the codename for the release of the current Linux distribution,
as a string.
If the distribution does not have a codename, an empty string is returned.
Note that the returned codename is not always really a codename. For
example, openSUSE returns "x86_64". This function does not handle such
cases in any special way and just returns the string it finds, if any.
**Lookup hierarchy:**
* the codename within the "VERSION" attribute of the os-release file, if
provided,
* the value of the "Codename" attribute returned by the lsb_release
command,
* the value of the "<codename>" field of the distro release file.
"""
return _distro.codename()
def info(pretty=False, best=False):
"""
Return certain machine-readable information items about the current Linux
distribution in a dictionary, as shown in the following example:
.. sourcecode:: python
{
'id': 'rhel',
'version': '7.0',
'version_parts': {
'major': '7',
'minor': '0',
'build_number': ''
},
'like': 'fedora',
'codename': 'Maipo'
}
The dictionary structure and keys are always the same, regardless of which
information items are available in the underlying data sources. The values
for the various keys are as follows:
* ``id``: The result of :func:`distro.id`.
* ``version``: The result of :func:`distro.version`.
* ``version_parts -> major``: The result of :func:`distro.major_version`.
* ``version_parts -> minor``: The result of :func:`distro.minor_version`.
* ``version_parts -> build_number``: The result of
:func:`distro.build_number`.
* ``like``: The result of :func:`distro.like`.
* ``codename``: The result of :func:`distro.codename`.
For a description of the *pretty* and *best* parameters, see the
:func:`distro.version` method.
"""
return _distro.info(pretty, best)
def os_release_info():
"""
Return a dictionary containing key-value pairs for the information items
from the os-release file data source of the current Linux distribution.
See `os-release file`_ for details about these information items.
"""
return _distro.os_release_info()
def lsb_release_info():
"""
Return a dictionary containing key-value pairs for the information items
from the lsb_release command data source of the current Linux distribution.
See `lsb_release command output`_ for details about these information
items.
"""
return _distro.lsb_release_info()
def distro_release_info():
"""
Return a dictionary containing key-value pairs for the information items
from the distro release file data source of the current Linux distribution.
See `distro release file`_ for details about these information items.
"""
return _distro.distro_release_info()
def os_release_attr(attribute):
"""
Return a single named information item from the os-release file data source
of the current Linux distribution.
Parameters:
* ``attribute`` (string): Key of the information item.
Returns:
* (string): Value of the information item, if the item exists.
The empty string, if the item does not exist.
See `os-release file`_ for details about these information items.
"""
return _distro.os_release_attr(attribute)
def lsb_release_attr(attribute):
"""
Return a single named information item from the lsb_release command output
data source of the current Linux distribution.
Parameters:
* ``attribute`` (string): Key of the information item.
Returns:
* (string): Value of the information item, if the item exists.
The empty string, if the item does not exist.
See `lsb_release command output`_ for details about these information
items.
"""
return _distro.lsb_release_attr(attribute)
def distro_release_attr(attribute):
"""
Return a single named information item from the distro release file
data source of the current Linux distribution.
Parameters:
* ``attribute`` (string): Key of the information item.
Returns:
* (string): Value of the information item, if the item exists.
The empty string, if the item does not exist.
See `distro release file`_ for details about these information items.
"""
return _distro.distro_release_attr(attribute)
class LinuxDistribution(object):
"""
Provides information about a Linux distribution.
This package creates a private module-global instance of this class with
default initialization arguments, that is used by the
`consolidated accessor functions`_ and `single source accessor functions`_.
By using default initialization arguments, that module-global instance
returns data about the current Linux distribution (i.e. the distro this
package runs on).
Normally, it is not necessary to create additional instances of this class.
However, in situations where control is needed over the exact data sources
that are used, instances of this class can be created with a specific
distro release file, or a specific os-release file, or without invoking the
lsb_release command.
"""
def __init__(self,
include_lsb=True,
os_release_file='',
distro_release_file=''):
"""
The initialization method of this class gathers information from the
available data sources, and stores that in private instance attributes.
Subsequent access to the information items uses these private instance
attributes, so that the data sources are read only once.
Parameters:
* ``include_lsb`` (bool): Controls whether the
`lsb_release command output`_ is included as a data source.
If the lsb_release command is not available in the program execution
path, the data source for the lsb_release command will be empty.
* ``os_release_file`` (string): The path name of the
`os-release file`_ that is to be used as a data source.
An empty string (the default) will cause the default path name to
be used (see `os-release file`_ for details).
If the specified or defaulted os-release file does not exist, the
data source for the os-release file will be empty.
* ``distro_release_file`` (string): The path name of the
`distro release file`_ that is to be used as a data source.
An empty string (the default) will cause a default search algorithm
to be used (see `distro release file`_ for details).
If the specified distro release file does not exist, or if no default
distro release file can be found, the data source for the distro
release file will be empty.
Public instance attributes:
* ``os_release_file`` (string): The path name of the
`os-release file`_ that is actually used as a data source. The
empty string if no distro release file is used as a data source.
* ``distro_release_file`` (string): The path name of the
`distro release file`_ that is actually used as a data source. The
empty string if no distro release file is used as a data source.
Raises:
* :py:exc:`IOError`: Some I/O issue with an os-release file or distro
release file.
* :py:exc:`subprocess.CalledProcessError`: The lsb_release command had
some issue (other than not being available in the program execution
path).
* :py:exc:`UnicodeError`: A data source has unexpected characters or
uses an unexpected encoding.
"""
self.os_release_file = os_release_file or \
os.path.join(_UNIXCONFDIR, _OS_RELEASE_BASENAME)
self.distro_release_file = distro_release_file or '' # updated later
self._os_release_info = self._get_os_release_info()
self._lsb_release_info = self._get_lsb_release_info() \
if include_lsb else {}
self._distro_release_info = self._get_distro_release_info()
def __repr__(self):
"""Return repr of all info
"""
return \
"LinuxDistribution(" \
"os_release_file={0!r}, " \
"distro_release_file={1!r}, " \
"_os_release_info={2!r}, " \
"_lsb_release_info={3!r}, " \
"_distro_release_info={4!r})".format(
self.os_release_file,
self.distro_release_file,
self._os_release_info,
self._lsb_release_info,
self._distro_release_info)
def linux_distribution(self, full_distribution_name=True):
"""
Return information about the Linux distribution that is compatible
with Python's :func:`platform.linux_distribution`, supporting a subset
of its parameters.
For details, see :func:`distro.linux_distribution`.
"""
return (
self.name() if full_distribution_name else self.id(),
self.version(),
self.codename()
)
def id(self):
"""Return the distro ID of the Linux distribution, as a string.
For details, see :func:`distro.id`.
"""
def normalize(distro_id, table):
distro_id = distro_id.lower().replace(' ', '_')
return table.get(distro_id, distro_id)
distro_id = self.os_release_attr('id')
if distro_id:
return normalize(distro_id, NORMALIZED_OS_ID)
distro_id = self.lsb_release_attr('distributor_id')
if distro_id:
return normalize(distro_id, NORMALIZED_LSB_ID)
distro_id = self.distro_release_attr('id')
if distro_id:
return normalize(distro_id, NORMALIZED_DISTRO_ID)
return ''
def name(self, pretty=False):
"""
Return the name of the Linux distribution, as a string.
For details, see :func:`distro.name`.
"""
name = self.os_release_attr('name') \
or self.lsb_release_attr('distributor_id') \
or self.distro_release_attr('name')
if pretty:
name = self.os_release_attr('pretty_name') \
or self.lsb_release_attr('description')
if not name:
name = self.distro_release_attr('name')
version = self.version(pretty=True)
if version:
name = name + ' ' + version
return name or ''
def version(self, pretty=False, best=False):
"""
Return the version of the Linux distribution, as a string.
For details, see :func:`distro.version`.
"""
versions = [
self.os_release_attr('version_id'),
self.lsb_release_attr('release'),
self.distro_release_attr('version_id'),
self._parse_distro_release_content(
self.os_release_attr('pretty_name')).get('version_id', ''),
self._parse_distro_release_content(
self.lsb_release_attr('description')).get('version_id', '')
]
version = ''
if best:
# This algorithm uses the last version in priority order that has
# the best precision. If the versions are not in conflict, that
# does not matter; otherwise, using the last one instead of the
# first one might be considered a surprise.
for v in versions:
if v.count(".") > version.count(".") or version == '':
version = v
else:
for v in versions:
if v != '':
version = v
break
if pretty and version and self.codename():
version = u'{0} ({1})'.format(version, self.codename())
return version
def version_parts(self, best=False):
"""
Return the version of the Linux distribution, as a tuple of version
numbers.
For details, see :func:`distro.version_parts`.
"""
version_str = self.version(best=best)
if version_str:
version_regex = re.compile(r'(\d+)\.?(\d+)?\.?(\d+)?')
matches = version_regex.match(version_str)
if matches:
major, minor, build_number = matches.groups()
return major, minor or '', build_number or ''
return '', '', ''
def major_version(self, best=False):
"""
Return the major version number of the current distribution.
For details, see :func:`distro.major_version`.
"""
return self.version_parts(best)[0]
def minor_version(self, best=False):
"""
Return the minor version number of the Linux distribution.
For details, see :func:`distro.minor_version`.
"""
return self.version_parts(best)[1]
def build_number(self, best=False):
"""
Return the build number of the Linux distribution.
For details, see :func:`distro.build_number`.
"""
return self.version_parts(best)[2]
def like(self):
"""
Return the IDs of distributions that are like the Linux distribution.
For details, see :func:`distro.like`.
"""
return self.os_release_attr('id_like') or ''
def codename(self):
"""
Return the codename of the Linux distribution.
For details, see :func:`distro.codename`.
"""
return self.os_release_attr('codename') \
or self.lsb_release_attr('codename') \
or self.distro_release_attr('codename') \
or ''
def info(self, pretty=False, best=False):
"""
Return certain machine-readable information about the Linux
distribution.
For details, see :func:`distro.info`.
"""
return dict(
id=self.id(),
version=self.version(pretty, best),
version_parts=dict(
major=self.major_version(best),
minor=self.minor_version(best),
build_number=self.build_number(best)
),
like=self.like(),
codename=self.codename(),
)
def os_release_info(self):
"""
Return a dictionary containing key-value pairs for the information
items from the os-release file data source of the Linux distribution.
For details, see :func:`distro.os_release_info`.
"""
return self._os_release_info
def lsb_release_info(self):
"""
Return a dictionary containing key-value pairs for the information
items from the lsb_release command data source of the Linux
distribution.
For details, see :func:`distro.lsb_release_info`.
"""
return self._lsb_release_info
def distro_release_info(self):
"""
Return a dictionary containing key-value pairs for the information
items from the distro release file data source of the Linux
distribution.
For details, see :func:`distro.distro_release_info`.
"""
return self._distro_release_info
def os_release_attr(self, attribute):
"""
Return a single named information item from the os-release file data
source of the Linux distribution.
For details, see :func:`distro.os_release_attr`.
"""
return self._os_release_info.get(attribute, '')
def lsb_release_attr(self, attribute):
"""
Return a single named information item from the lsb_release command
output data source of the Linux distribution.
For details, see :func:`distro.lsb_release_attr`.
"""
return self._lsb_release_info.get(attribute, '')
def distro_release_attr(self, attribute):
"""
Return a single named information item from the distro release file
data source of the Linux distribution.
For details, see :func:`distro.distro_release_attr`.
"""
return self._distro_release_info.get(attribute, '')
def _get_os_release_info(self):
"""
Get the information items from the specified os-release file.
Returns:
A dictionary containing all information items.
"""
if os.path.isfile(self.os_release_file):
with open(self.os_release_file) as release_file:
return self._parse_os_release_content(release_file)
return {}
@staticmethod
def _parse_os_release_content(lines):
"""
Parse the lines of an os-release file.
Parameters:
* lines: Iterable through the lines in the os-release file.
Each line must be a unicode string or a UTF-8 encoded byte
string.
Returns:
A dictionary containing all information items.
"""
props = {}
lexer = shlex.shlex(lines, posix=True)
lexer.whitespace_split = True
# The shlex module defines its `wordchars` variable using literals,
# making it dependent on the encoding of the Python source file.
# In Python 2.6 and 2.7, the shlex source file is encoded in
# 'iso-8859-1', and the `wordchars` variable is defined as a byte
# string. This causes a UnicodeDecodeError to be raised when the
# parsed content is a unicode object. The following fix resolves that
# (... but it should be fixed in shlex...):
if sys.version_info[0] == 2 and isinstance(lexer.wordchars, bytes):
lexer.wordchars = lexer.wordchars.decode('iso-8859-1')
tokens = list(lexer)
for token in tokens:
# At this point, all shell-like parsing has been done (i.e.
# comments processed, quotes and backslash escape sequences
# processed, multi-line values assembled, trailing newlines
# stripped, etc.), so the tokens are now either:
# * variable assignments: var=value
# * commands or their arguments (not allowed in os-release)
if '=' in token:
k, v = token.split('=', 1)
if isinstance(v, bytes):
v = v.decode('utf-8')
props[k.lower()] = v
if k == 'VERSION':
# this handles cases in which the codename is in
# the `(CODENAME)` (rhel, centos, fedora) format
# or in the `, CODENAME` format (Ubuntu).
codename = re.search(r'(\(\D+\))|,(\s+)?\D+', v)
if codename:
codename = codename.group()
codename = codename.strip('()')
codename = codename.strip(',')
codename = codename.strip()
# codename appears within paranthese.
props['codename'] = codename
else:
props['codename'] = ''
else:
# Ignore any tokens that are not variable assignments
pass
return props
def _get_lsb_release_info(self):
"""
Get the information items from the lsb_release command output.
Returns:
A dictionary containing all information items.
"""
cmd = 'lsb_release -a'
process = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
stdout, stderr = stdout.decode('utf-8'), stderr.decode('utf-8')
code = process.returncode
if code == 0:
content = stdout.splitlines()
return self._parse_lsb_release_content(content)
elif code == 127: # Command not found
return {}
else:
if sys.version_info[:2] >= (3, 5):
raise subprocess.CalledProcessError(code, cmd, stdout, stderr)
elif sys.version_info[:2] >= (2, 7):
raise subprocess.CalledProcessError(code, cmd, stdout)
elif sys.version_info[:2] == (2, 6):
raise subprocess.CalledProcessError(code, cmd)
@staticmethod
def _parse_lsb_release_content(lines):
"""
Parse the output of the lsb_release command.
Parameters:
* lines: Iterable through the lines of the lsb_release output.
Each line must be a unicode string or a UTF-8 encoded byte
string.
Returns:
A dictionary containing all information items.
"""
props = {}
for line in lines:
line = line.decode('utf-8') if isinstance(line, bytes) else line
kv = line.strip('\n').split(':', 1)
if len(kv) != 2:
# Ignore lines without colon.
continue
k, v = kv
props.update({k.replace(' ', '_').lower(): v.strip()})
return props
def _get_distro_release_info(self):
"""
Get the information items from the specified distro release file.
Returns:
A dictionary containing all information items.
"""
if self.distro_release_file:
# If it was specified, we use it and parse what we can, even if
# its file name or content does not match the expected pattern.
distro_info = self._parse_distro_release_file(
self.distro_release_file)
basename = os.path.basename(self.distro_release_file)
# The file name pattern for user-specified distro release files
# is somewhat more tolerant (compared to when searching for the
# file), because we want to use what was specified as best as
# possible.
match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
if match:
distro_info['id'] = match.group(1)
return distro_info
else:
basenames = os.listdir(_UNIXCONFDIR)
# We sort for repeatability in cases where there are multiple
# distro specific files; e.g. CentOS, Oracle, Enterprise all
# containing `redhat-release` on top of their own.
basenames.sort()
for basename in basenames:
if basename in _DISTRO_RELEASE_IGNORE_BASENAMES:
continue
match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
if match:
filepath = os.path.join(_UNIXCONFDIR, basename)
distro_info = self._parse_distro_release_file(filepath)
if 'name' in distro_info:
# The name is always present if the pattern matches
self.distro_release_file = filepath
distro_info['id'] = match.group(1)
return distro_info
return {}
def _parse_distro_release_file(self, filepath):
"""
Parse a distro release file.
Parameters:
* filepath: Path name of the distro release file.
Returns:
A dictionary containing all information items.
"""
if os.path.isfile(filepath):
with open(filepath) as fp:
# Only parse the first line. For instance, on SLES there
# are multiple lines. We don't want them...
return self._parse_distro_release_content(fp.readline())
return {}
@staticmethod
def _parse_distro_release_content(line):
"""
Parse a line from a distro release file.
Parameters:
* line: Line from the distro release file. Must be a unicode string
or a UTF-8 encoded byte string.
Returns:
A dictionary containing all information items.
"""
if isinstance(line, bytes):
line = line.decode('utf-8')
matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(
line.strip()[::-1])
distro_info = {}
if matches:
# regexp ensures non-None
distro_info['name'] = matches.group(3)[::-1]
if matches.group(2):
distro_info['version_id'] = matches.group(2)[::-1]
if matches.group(1):
distro_info['codename'] = matches.group(1)[::-1]
elif line:
distro_info['name'] = line.strip()
return distro_info
_distro = LinuxDistribution()
def main():
import argparse
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
parser = argparse.ArgumentParser(description="Linux distro info tool")
parser.add_argument(
'--json',
'-j',
help="Output in machine readable format",
action="store_true")
args = parser.parse_args()
if args.json:
logger.info(json.dumps(info(), indent=4, sort_keys=True))
else:
logger.info('Name: %s', name(pretty=True))
distribution_version = version(pretty=True)
if distribution_version:
logger.info('Version: %s', distribution_version)
distribution_codename = codename()
if distribution_codename:
logger.info('Codename: %s', distribution_codename)
if __name__ == '__main__':
main()
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.