repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
flavour/Turkey | modules/tests/staff/staff_report.py | 24 | 3843 | # -*- coding: utf-8 -*-
""" Sahana Eden Staff Module Automated Tests
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from gluon import current
from tests.web2unittest import SeleniumUnitTest
class StaffReport(SeleniumUnitTest):
"""
@case: hrm008
@description: Staff Report
"""
def setUp(self):
super(StaffReport, self).setUp()
print "\n"
self.login(account="admin", nexturl="hrm/staff/report")
self.settings = current.deployment_settings
def test_staff_report_simple(self):
self.report(None,
self.settings.get_hrm_organisation_label(),
"County / District",
self.settings.get_hrm_organisation_label() + " (Count)",
("Timor-Leste Red Cross Society (Cruz Vermelha de Timor-Leste) (CVTL)", "Ainaro", 1),
("Timor-Leste Red Cross Society (Cruz Vermelha de Timor-Leste) (CVTL)", "Kuala Lumpur", 0),
)
def test_staff_report_filter(self):
self.report(
({
"name": "human_resource_search_select_organisation_id",
"label": "Timor-Leste Red Cross Society (CVTL)",
"value": True
},), self.settings.get_hrm_organisation_label(),
"County / District",
self.settings.get_hrm_organisation_label() + " (Count)",
row_count=1)
def test_staff_report_filter_L0_L1(self):
self.report(
({
"name": "human_resource_search_select_location_id$L0",
"label": "Timor-Leste",
"value": True
},
{
"name": "human_resource_search_select_location_id$L2",
"label": "Ainaro",
"value": True
}),
"County / District",
self.settings.get_hrm_organisation_label(),
self.settings.get_hrm_organisation_label() + " (Count)",
row_count=1)
def test_staff_report_person(self):
self.report(None,
self.settings.get_hrm_organisation_label(),
"State / Province",
"Person (List)",
("Timor-Leste Red Cross Society (Cruz Vermelha de Timor-Leste) (CVTL)", "Dili",
("Duarte Botelheiro",
"Adriana Macedo",
"Quito Cromos",
"Guilherme Soares",
"Xanana Chilro",
u"José Saboga",
"Elly Marques",
"Nilton Moniz",
"Herculano Ximenes",
"Goku Gohan")
))
| mit |
tarzasai/Flexget | flexget/plugins/input/from_imdb.py | 3 | 12082 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from past.builtins import basestring
import collections
import logging
from jsonschema.compat import str_types
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.event import event
from flexget.entry import Entry
from flexget.utils.cached_input import cached
log = logging.getLogger('from_imdb')
class FromIMDB(object):
"""
This plugin enables generating entries based on an entity, an entity being a person, character or company.
It's based on IMDBpy which is required (pip install imdbpy). The basic config required just an IMDB ID of the
required entity.
For example:
from_imdb: ch0001354
Schema description:
Other than ID, all other properties are meant to filter the full list that the entity generates.
id: string that relates to a supported entity type. For example: 'nm0000375'. Required.
job_types: a string or list with job types from job_types. Default is 'actor'.
content_types: A string or list with content types from content_types. Default is 'movie'.
max_entries: The maximum number of entries that can return. This value's purpose is basically flood protection
against unruly configurations that will return too many results. Default is 200.
Advanced config example:
dynamic_movie_queue:
from_imdb:
id: co0051941
job_types:
- actor
- director
content_types: tv series
accept_all: yes
movie_queue: add
"""
job_types = ['actor', 'actress', 'director', 'producer', 'writer', 'self', 'editor', 'miscellaneous',
'editorial department', 'cinematographer', 'visual effects', 'thanks', 'music department',
'in development', 'archive footage', 'soundtrack']
content_types = ['movie', 'tv series', 'tv mini series', 'video game', 'video movie', 'tv movie', 'episode']
content_type_conversion = {
'movie': 'movie',
'tv series': 'tv',
'tv mini series': 'tv',
'tv movie': 'tv',
'episode': 'tv',
'video movie': 'video',
'video game': 'video game'
}
character_content_type_conversion = {
'movie': 'feature',
'tv series': 'tv',
'tv mini series': 'tv',
'tv movie': 'tv',
'episode': 'tv',
'video movie': 'video',
'video game': 'video-game',
}
jobs_without_content_type = ['actor', 'actress', 'self', 'in development', 'archive footage']
imdb_pattern = one_or_more({'type': 'string',
'pattern': r'(nm|co|ch)\d{7}',
'error_pattern': 'Get the id from the url of the person/company you want to use,'
' e.g. http://imdb.com/text/<id here>/blah'}, unique_items=True)
schema = {
'oneOf': [
imdb_pattern,
{'type': 'object',
'properties': {
'id': imdb_pattern,
'job_types': one_or_more({'type': 'string', 'enum': job_types}, unique_items=True),
'content_types': one_or_more({'type': 'string', 'enum': content_types}, unique_items=True),
'max_entries': {'type': 'integer'},
'match_type': {'type': 'string', 'enum': ['strict', 'loose']}
},
'required': ['id'],
'additionalProperties': False
}
],
}
def prepare_config(self, config):
"""
Converts config to dict form and sets defaults if needed
"""
config = config
if isinstance(config, basestring):
config = {'id': [config]}
elif isinstance(config, list):
config = {'id': config}
if isinstance(config, dict) and not isinstance(config['id'], list):
config['id'] = [config['id']]
config.setdefault('content_types', [self.content_types[0]])
config.setdefault('job_types', [self.job_types[0]])
config.setdefault('max_entries', 200)
config.setdefault('match_type', 'strict')
if isinstance(config.get('content_types'), str_types):
log.debug('Converted content type from string to list.')
config['content_types'] = [config['content_types']]
if isinstance(config['job_types'], str_types):
log.debug('Converted job type from string to list.')
config['job_types'] = [config['job_types']]
# Special case in case user meant to add actress instead of actor (different job types in IMDB)
if 'actor' in config['job_types'] and 'actress' not in config['job_types']:
config['job_types'].append('actress')
return config
def get_items(self, config):
items = []
for id in config['id']:
try:
entity_type, entity_object = self.get_entity_type_and_object(id)
except Exception as e:
log.error(
'Could not resolve entity via ID: {}. '
'Either error in config or unsupported entity. Error:{}'.format(id, e))
continue
items += self.get_items_by_entity(entity_type, entity_object, config.get('content_types'),
config.get('job_types'), config.get('match_type'))
return set(items)
def get_entity_type_and_object(self, imdb_id):
"""
Return a tuple of entity type and entity object
:param imdb_id: string which contains IMDB id
:return: entity type, entity object (person, company, etc.)
"""
if imdb_id.startswith('nm'):
person = self.ia.get_person(imdb_id[2:])
log.info('Starting to retrieve items for person: %s' % person)
return 'Person', person
elif imdb_id.startswith('co'):
company = self.ia.get_company(imdb_id[2:])
log.info('Starting to retrieve items for company: %s' % company)
return 'Company', company
elif imdb_id.startswith('ch'):
character = self.ia.get_character(imdb_id[2:])
log.info('Starting to retrieve items for Character: %s' % character)
return 'Character', character
def get_items_by_entity(self, entity_type, entity_object, content_types, job_types, match_type):
"""
Gets entity object and return movie list using relevant method
"""
if entity_type == 'Company':
return self.items_by_company(entity_object)
if entity_type == 'Character':
return self.items_by_character(entity_object, content_types, match_type)
elif entity_type == 'Person':
return self.items_by_person(entity_object, job_types, content_types, match_type)
def flatten_list(self, _list):
"""
Gets a list of lists and returns a flat list
"""
for el in _list:
if isinstance(el, collections.Iterable) and not isinstance(el, basestring):
for sub in self.flatten_list(el):
yield sub
else:
yield el
def flat_list(self, non_flat_list, remove_none=False):
flat_list = self.flatten_list(non_flat_list)
if remove_none:
flat_list = [_f for _f in flat_list if _f]
return flat_list
def filtered_items(self, unfiltered_items, content_types, match_type):
items = []
unfiltered_items = set(unfiltered_items)
for item in sorted(unfiltered_items):
if match_type == 'strict':
log.debug('Match type is strict, verifying item type to requested content types')
self.ia.update(item)
if item['kind'] in content_types:
log.verbose('Adding item "{}" to list. Item kind is "{}"'.format(item, item['kind']))
items.append(item)
else:
log.verbose('Rejecting item "{}". Item kind is "{}'.format(item, item['kind']))
else:
log.debug('Match type is loose, all items are being added')
items.append(item)
return items
def items_by_person(self, person, job_types, content_types, match_type):
"""
Return item list for a person object
"""
unfiltered_items = self.flat_list(
[self.items_by_job_type(person, job_type, content_types) for job_type in job_types],
remove_none=True)
return self.filtered_items(unfiltered_items, content_types, match_type)
def items_by_content_type(self, person, job_type, content_type):
return [_f for _f in (person.get(job_type + ' ' + self.content_type_conversion[content_type], [])) if _f]
def items_by_job_type(self, person, job_type, content_types):
items = person.get(job_type, []) if job_type in self.jobs_without_content_type else [
person.get(job_type + ' ' + 'documentary', []) and
person.get(job_type + ' ' + 'short', []) and
self.items_by_content_type(person, job_type, content_type)
if content_type == 'movie'
else
self.items_by_content_type(person, job_type, content_type)
for content_type in content_types
]
return [_f for _f in items if _f]
def items_by_character(self, character, content_types, match_type):
"""
Return items list for a character object
:param character: character object
:param content_types: content types as defined in config
:return:
"""
unfiltered_items = self.flat_list(
[character.get(self.character_content_type_conversion[content_type])
for content_type in content_types], remove_none=True)
return self.filtered_items(unfiltered_items, content_types, match_type)
def items_by_company(self, company):
"""
Return items list for a company object
:param company: company object
:return: company items list
"""
return company.get('production companies')
@cached('from_imdb', persist='2 hours')
def on_task_input(self, task, config):
try:
from imdb import IMDb
self.ia = IMDb()
except ImportError:
log.error('IMDBPY is required for this plugin. Please install using "pip install imdbpy"')
return
entries = []
config = self.prepare_config(config)
items = self.get_items(config)
if not items:
log.error('Could not get IMDB item list, check your configuration.')
return
for item in items:
entry = Entry(title=item['title'],
imdb_id='tt' + self.ia.get_imdbID(item),
url='',
imdb_url=self.ia.get_imdbURL(item))
if entry.isvalid():
if entry not in entries:
entries.append(entry)
if entry and task.options.test:
log.info("Test mode. Entry includes:")
for key, value in list(entry.items()):
log.info(' {}: {}'.format(key.capitalize(), value))
else:
log.error('Invalid entry created? %s' % entry)
if len(entries) <= config.get('max_entries'):
return entries
else:
log.warning(
'Number of entries (%s) exceeds maximum allowed value %s. '
'Edit your filters or raise the maximum value by entering a higher "max_entries"' % (
len(entries), config.get('max_entries')))
return
@event('plugin.register')
def register_plugin():
plugin.register(FromIMDB, 'from_imdb', api_ver=2)
| mit |
korfuri/django-prometheus | django_prometheus/models.py | 1 | 1478 | from prometheus_client import Counter
from django_prometheus.conf import NAMESPACE
model_inserts = Counter(
"django_model_inserts_total",
"Number of insert operations by model.",
["model"],
namespace=NAMESPACE,
)
model_updates = Counter(
"django_model_updates_total",
"Number of update operations by model.",
["model"],
namespace=NAMESPACE,
)
model_deletes = Counter(
"django_model_deletes_total",
"Number of delete operations by model.",
["model"],
namespace=NAMESPACE,
)
def ExportModelOperationsMixin(model_name):
"""Returns a mixin for models to export counters for lifecycle operations.
Usage:
class User(ExportModelOperationsMixin('user'), Model):
...
"""
# Force create the labels for this model in the counters. This
# is not necessary but it avoids gaps in the aggregated data.
model_inserts.labels(model_name)
model_updates.labels(model_name)
model_deletes.labels(model_name)
class Mixin:
def _do_insert(self, *args, **kwargs):
model_inserts.labels(model_name).inc()
return super()._do_insert(*args, **kwargs)
def _do_update(self, *args, **kwargs):
model_updates.labels(model_name).inc()
return super()._do_update(*args, **kwargs)
def delete(self, *args, **kwargs):
model_deletes.labels(model_name).inc()
return super().delete(*args, **kwargs)
return Mixin
| apache-2.0 |
wikimedia/operations-debs-python-kafka | kafka/conn.py | 1 | 65182 | from __future__ import absolute_import, division
import collections
import copy
import errno
import io
import logging
from random import shuffle, uniform
# selectors in stdlib as of py3.4
try:
import selectors # pylint: disable=import-error
except ImportError:
# vendored backport module
from kafka.vendor import selectors34 as selectors
import socket
import struct
import sys
import threading
import time
from kafka.vendor import six
import kafka.errors as Errors
from kafka.future import Future
from kafka.metrics.stats import Avg, Count, Max, Rate
from kafka.oauth.abstract import AbstractTokenProvider
from kafka.protocol.admin import SaslHandShakeRequest
from kafka.protocol.commit import OffsetFetchRequest
from kafka.protocol.metadata import MetadataRequest
from kafka.protocol.parser import KafkaProtocol
from kafka.protocol.types import Int32, Int8
from kafka.version import __version__
if six.PY2:
ConnectionError = socket.error
TimeoutError = socket.error
BlockingIOError = Exception
log = logging.getLogger(__name__)
DEFAULT_KAFKA_PORT = 9092
SASL_QOP_AUTH = 1
SASL_QOP_AUTH_INT = 2
SASL_QOP_AUTH_CONF = 4
try:
import ssl
ssl_available = True
try:
SSLEOFError = ssl.SSLEOFError
SSLWantReadError = ssl.SSLWantReadError
SSLWantWriteError = ssl.SSLWantWriteError
SSLZeroReturnError = ssl.SSLZeroReturnError
except AttributeError:
# support older ssl libraries
log.warning('Old SSL module detected.'
' SSL error handling may not operate cleanly.'
' Consider upgrading to Python 3.3 or 2.7.9')
SSLEOFError = ssl.SSLError
SSLWantReadError = ssl.SSLError
SSLWantWriteError = ssl.SSLError
SSLZeroReturnError = ssl.SSLError
except ImportError:
# support Python without ssl libraries
ssl_available = False
class SSLWantReadError(Exception):
pass
class SSLWantWriteError(Exception):
pass
# needed for SASL_GSSAPI authentication:
try:
import gssapi
from gssapi.raw.misc import GSSError
except ImportError:
#no gssapi available, will disable gssapi mechanism
gssapi = None
GSSError = None
AFI_NAMES = {
socket.AF_UNSPEC: "unspecified",
socket.AF_INET: "IPv4",
socket.AF_INET6: "IPv6",
}
class ConnectionStates(object):
DISCONNECTING = '<disconnecting>'
DISCONNECTED = '<disconnected>'
CONNECTING = '<connecting>'
HANDSHAKE = '<handshake>'
CONNECTED = '<connected>'
AUTHENTICATING = '<authenticating>'
class BrokerConnection(object):
"""Initialize a Kafka broker connection
Keyword Arguments:
client_id (str): a name for this client. This string is passed in
each request to servers and can be used to identify specific
server-side log entries that correspond to this client. Also
submitted to GroupCoordinator for logging with respect to
consumer group administration. Default: 'kafka-python-{version}'
reconnect_backoff_ms (int): The amount of time in milliseconds to
wait before attempting to reconnect to a given host.
Default: 50.
reconnect_backoff_max_ms (int): The maximum amount of time in
milliseconds to wait when reconnecting to a broker that has
repeatedly failed to connect. If provided, the backoff per host
will increase exponentially for each consecutive connection
failure, up to this maximum. To avoid connection storms, a
randomization factor of 0.2 will be applied to the backoff
resulting in a random range between 20% below and 20% above
the computed value. Default: 1000.
request_timeout_ms (int): Client request timeout in milliseconds.
Default: 30000.
max_in_flight_requests_per_connection (int): Requests are pipelined
to kafka brokers up to this number of maximum requests per
broker connection. Default: 5.
receive_buffer_bytes (int): The size of the TCP receive buffer
(SO_RCVBUF) to use when reading data. Default: None (relies on
system defaults). Java client defaults to 32768.
send_buffer_bytes (int): The size of the TCP send buffer
(SO_SNDBUF) to use when sending data. Default: None (relies on
system defaults). Java client defaults to 131072.
socket_options (list): List of tuple-arguments to socket.setsockopt
to apply to broker connection sockets. Default:
[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
security_protocol (str): Protocol used to communicate with brokers.
Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL.
Default: PLAINTEXT.
ssl_context (ssl.SSLContext): pre-configured SSLContext for wrapping
socket connections. If provided, all other ssl_* configurations
will be ignored. Default: None.
ssl_check_hostname (bool): flag to configure whether ssl handshake
should verify that the certificate matches the brokers hostname.
default: True.
ssl_cafile (str): optional filename of ca file to use in certificate
verification. default: None.
ssl_certfile (str): optional filename of file in pem format containing
the client certificate, as well as any ca certificates needed to
establish the certificate's authenticity. default: None.
ssl_keyfile (str): optional filename containing the client private key.
default: None.
ssl_password (callable, str, bytes, bytearray): optional password or
callable function that returns a password, for decrypting the
client private key. Default: None.
ssl_crlfile (str): optional filename containing the CRL to check for
certificate expiration. By default, no CRL check is done. When
providing a file, only the leaf certificate will be checked against
this CRL. The CRL can only be checked with Python 3.4+ or 2.7.9+.
default: None.
ssl_ciphers (str): optionally set the available ciphers for ssl
connections. It should be a string in the OpenSSL cipher list
format. If no cipher can be selected (because compile-time options
or other configuration forbids use of all the specified ciphers),
an ssl.SSLError will be raised. See ssl.SSLContext.set_ciphers
api_version (tuple): Specify which Kafka API version to use.
Accepted values are: (0, 8, 0), (0, 8, 1), (0, 8, 2), (0, 9),
(0, 10). Default: (0, 8, 2)
api_version_auto_timeout_ms (int): number of milliseconds to throw a
timeout exception from the constructor when checking the broker
api version. Only applies if api_version is None
selector (selectors.BaseSelector): Provide a specific selector
implementation to use for I/O multiplexing.
Default: selectors.DefaultSelector
state_change_callback (callable): function to be called when the
connection state changes from CONNECTING to CONNECTED etc.
metrics (kafka.metrics.Metrics): Optionally provide a metrics
instance for capturing network IO stats. Default: None.
metric_group_prefix (str): Prefix for metric names. Default: ''
sasl_mechanism (str): Authentication mechanism when security_protocol
is configured for SASL_PLAINTEXT or SASL_SSL. Valid values are:
PLAIN, GSSAPI, OAUTHBEARER.
sasl_plain_username (str): username for sasl PLAIN authentication.
Required if sasl_mechanism is PLAIN.
sasl_plain_password (str): password for sasl PLAIN authentication.
Required if sasl_mechanism is PLAIN.
sasl_kerberos_service_name (str): Service name to include in GSSAPI
sasl mechanism handshake. Default: 'kafka'
sasl_kerberos_domain_name (str): kerberos domain name to use in GSSAPI
sasl mechanism handshake. Default: one of bootstrap servers
sasl_oauth_token_provider (AbstractTokenProvider): OAuthBearer token provider
instance. (See kafka.oauth.abstract). Default: None
"""
DEFAULT_CONFIG = {
'client_id': 'kafka-python-' + __version__,
'node_id': 0,
'request_timeout_ms': 30000,
'reconnect_backoff_ms': 50,
'reconnect_backoff_max_ms': 1000,
'max_in_flight_requests_per_connection': 5,
'receive_buffer_bytes': None,
'send_buffer_bytes': None,
'socket_options': [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)],
'sock_chunk_bytes': 4096, # undocumented experimental option
'sock_chunk_buffer_count': 1000, # undocumented experimental option
'security_protocol': 'PLAINTEXT',
'ssl_context': None,
'ssl_check_hostname': True,
'ssl_cafile': None,
'ssl_certfile': None,
'ssl_keyfile': None,
'ssl_crlfile': None,
'ssl_password': None,
'ssl_ciphers': None,
'api_version': (0, 8, 2), # default to most restrictive
'selector': selectors.DefaultSelector,
'state_change_callback': lambda node_id, sock, conn: True,
'metrics': None,
'metric_group_prefix': '',
'sasl_mechanism': None,
'sasl_plain_username': None,
'sasl_plain_password': None,
'sasl_kerberos_service_name': 'kafka',
'sasl_kerberos_domain_name': None,
'sasl_oauth_token_provider': None
}
SECURITY_PROTOCOLS = ('PLAINTEXT', 'SSL', 'SASL_PLAINTEXT', 'SASL_SSL')
SASL_MECHANISMS = ('PLAIN', 'GSSAPI', 'OAUTHBEARER')
def __init__(self, host, port, afi, **configs):
self.host = host
self.port = port
self.afi = afi
self._sock_afi = afi
self._sock_addr = None
self._api_versions = None
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs[key]
self.node_id = self.config.pop('node_id')
if self.config['receive_buffer_bytes'] is not None:
self.config['socket_options'].append(
(socket.SOL_SOCKET, socket.SO_RCVBUF,
self.config['receive_buffer_bytes']))
if self.config['send_buffer_bytes'] is not None:
self.config['socket_options'].append(
(socket.SOL_SOCKET, socket.SO_SNDBUF,
self.config['send_buffer_bytes']))
assert self.config['security_protocol'] in self.SECURITY_PROTOCOLS, (
'security_protcol must be in ' + ', '.join(self.SECURITY_PROTOCOLS))
if self.config['security_protocol'] in ('SSL', 'SASL_SSL'):
assert ssl_available, "Python wasn't built with SSL support"
if self.config['security_protocol'] in ('SASL_PLAINTEXT', 'SASL_SSL'):
assert self.config['sasl_mechanism'] in self.SASL_MECHANISMS, (
'sasl_mechanism must be in ' + ', '.join(self.SASL_MECHANISMS))
if self.config['sasl_mechanism'] == 'PLAIN':
assert self.config['sasl_plain_username'] is not None, 'sasl_plain_username required for PLAIN sasl'
assert self.config['sasl_plain_password'] is not None, 'sasl_plain_password required for PLAIN sasl'
if self.config['sasl_mechanism'] == 'GSSAPI':
assert gssapi is not None, 'GSSAPI lib not available'
assert self.config['sasl_kerberos_service_name'] is not None, 'sasl_kerberos_service_name required for GSSAPI sasl'
if self.config['sasl_mechanism'] == 'OAUTHBEARER':
token_provider = self.config['sasl_oauth_token_provider']
assert token_provider is not None, 'sasl_oauth_token_provider required for OAUTHBEARER sasl'
assert callable(getattr(token_provider, "token", None)), 'sasl_oauth_token_provider must implement method #token()'
# This is not a general lock / this class is not generally thread-safe yet
# However, to avoid pushing responsibility for maintaining
# per-connection locks to the upstream client, we will use this lock to
# make sure that access to the protocol buffer is synchronized
# when sends happen on multiple threads
self._lock = threading.Lock()
# the protocol parser instance manages actual tracking of the
# sequence of in-flight requests to responses, which should
# function like a FIFO queue. For additional request data,
# including tracking request futures and timestamps, we
# can use a simple dictionary of correlation_id => request data
self.in_flight_requests = dict()
self._protocol = KafkaProtocol(
client_id=self.config['client_id'],
api_version=self.config['api_version'])
self.state = ConnectionStates.DISCONNECTED
self._reset_reconnect_backoff()
self._sock = None
self._send_buffer = b''
self._ssl_context = None
if self.config['ssl_context'] is not None:
self._ssl_context = self.config['ssl_context']
self._sasl_auth_future = None
self.last_attempt = 0
self._gai = []
self._sensors = None
if self.config['metrics']:
self._sensors = BrokerConnectionMetrics(self.config['metrics'],
self.config['metric_group_prefix'],
self.node_id)
def _dns_lookup(self):
self._gai = dns_lookup(self.host, self.port, self.afi)
if not self._gai:
log.error('DNS lookup failed for %s:%i (%s)',
self.host, self.port, self.afi)
return False
return True
def _next_afi_sockaddr(self):
if not self._gai:
if not self._dns_lookup():
return
afi, _, __, ___, sockaddr = self._gai.pop(0)
return (afi, sockaddr)
def connect_blocking(self, timeout=float('inf')):
if self.connected():
return True
timeout += time.time()
# First attempt to perform dns lookup
# note that the underlying interface, socket.getaddrinfo,
# has no explicit timeout so we may exceed the user-specified timeout
self._dns_lookup()
# Loop once over all returned dns entries
selector = None
while self._gai:
while time.time() < timeout:
self.connect()
if self.connected():
if selector is not None:
selector.close()
return True
elif self.connecting():
if selector is None:
selector = self.config['selector']()
selector.register(self._sock, selectors.EVENT_WRITE)
selector.select(1)
elif self.disconnected():
if selector is not None:
selector.close()
selector = None
break
else:
break
return False
def connect(self):
"""Attempt to connect and return ConnectionState"""
if self.state is ConnectionStates.DISCONNECTED and not self.blacked_out():
self.last_attempt = time.time()
next_lookup = self._next_afi_sockaddr()
if not next_lookup:
self.close(Errors.KafkaConnectionError('DNS failure'))
return self.state
else:
log.debug('%s: creating new socket', self)
assert self._sock is None
self._sock_afi, self._sock_addr = next_lookup
self._sock = socket.socket(self._sock_afi, socket.SOCK_STREAM)
for option in self.config['socket_options']:
log.debug('%s: setting socket option %s', self, option)
self._sock.setsockopt(*option)
self._sock.setblocking(False)
self.state = ConnectionStates.CONNECTING
self.config['state_change_callback'](self.node_id, self._sock, self)
log.info('%s: connecting to %s:%d [%s %s]', self, self.host,
self.port, self._sock_addr, AFI_NAMES[self._sock_afi])
if self.state is ConnectionStates.CONNECTING:
# in non-blocking mode, use repeated calls to socket.connect_ex
# to check connection status
ret = None
try:
ret = self._sock.connect_ex(self._sock_addr)
except socket.error as err:
ret = err.errno
# Connection succeeded
if not ret or ret == errno.EISCONN:
log.debug('%s: established TCP connection', self)
if self.config['security_protocol'] in ('SSL', 'SASL_SSL'):
log.debug('%s: initiating SSL handshake', self)
self.state = ConnectionStates.HANDSHAKE
self.config['state_change_callback'](self.node_id, self._sock, self)
# _wrap_ssl can alter the connection state -- disconnects on failure
self._wrap_ssl()
elif self.config['security_protocol'] == 'SASL_PLAINTEXT':
log.debug('%s: initiating SASL authentication', self)
self.state = ConnectionStates.AUTHENTICATING
self.config['state_change_callback'](self.node_id, self._sock, self)
else:
# security_protocol PLAINTEXT
log.info('%s: Connection complete.', self)
self.state = ConnectionStates.CONNECTED
self._reset_reconnect_backoff()
self.config['state_change_callback'](self.node_id, self._sock, self)
# Connection failed
# WSAEINVAL == 10022, but errno.WSAEINVAL is not available on non-win systems
elif ret not in (errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK, 10022):
log.error('Connect attempt to %s returned error %s.'
' Disconnecting.', self, ret)
errstr = errno.errorcode.get(ret, 'UNKNOWN')
self.close(Errors.KafkaConnectionError('{} {}'.format(ret, errstr)))
return self.state
# Needs retry
else:
pass
if self.state is ConnectionStates.HANDSHAKE:
if self._try_handshake():
log.debug('%s: completed SSL handshake.', self)
if self.config['security_protocol'] == 'SASL_SSL':
log.debug('%s: initiating SASL authentication', self)
self.state = ConnectionStates.AUTHENTICATING
else:
log.info('%s: Connection complete.', self)
self.state = ConnectionStates.CONNECTED
self._reset_reconnect_backoff()
self.config['state_change_callback'](self.node_id, self._sock, self)
if self.state is ConnectionStates.AUTHENTICATING:
assert self.config['security_protocol'] in ('SASL_PLAINTEXT', 'SASL_SSL')
if self._try_authenticate():
# _try_authenticate has side-effects: possibly disconnected on socket errors
if self.state is ConnectionStates.AUTHENTICATING:
log.info('%s: Connection complete.', self)
self.state = ConnectionStates.CONNECTED
self._reset_reconnect_backoff()
self.config['state_change_callback'](self.node_id, self._sock, self)
if self.state not in (ConnectionStates.CONNECTED,
ConnectionStates.DISCONNECTED):
# Connection timed out
request_timeout = self.config['request_timeout_ms'] / 1000.0
if time.time() > request_timeout + self.last_attempt:
log.error('Connection attempt to %s timed out', self)
self.close(Errors.KafkaConnectionError('timeout'))
return self.state
return self.state
def _wrap_ssl(self):
assert self.config['security_protocol'] in ('SSL', 'SASL_SSL')
if self._ssl_context is None:
log.debug('%s: configuring default SSL Context', self)
self._ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) # pylint: disable=no-member
self._ssl_context.options |= ssl.OP_NO_SSLv2 # pylint: disable=no-member
self._ssl_context.options |= ssl.OP_NO_SSLv3 # pylint: disable=no-member
self._ssl_context.verify_mode = ssl.CERT_OPTIONAL
if self.config['ssl_check_hostname']:
self._ssl_context.check_hostname = True
if self.config['ssl_cafile']:
log.info('%s: Loading SSL CA from %s', self, self.config['ssl_cafile'])
self._ssl_context.load_verify_locations(self.config['ssl_cafile'])
self._ssl_context.verify_mode = ssl.CERT_REQUIRED
else:
log.info('%s: Loading system default SSL CAs from %s', self, ssl.get_default_verify_paths())
self._ssl_context.load_default_certs()
if self.config['ssl_certfile'] and self.config['ssl_keyfile']:
log.info('%s: Loading SSL Cert from %s', self, self.config['ssl_certfile'])
log.info('%s: Loading SSL Key from %s', self, self.config['ssl_keyfile'])
self._ssl_context.load_cert_chain(
certfile=self.config['ssl_certfile'],
keyfile=self.config['ssl_keyfile'],
password=self.config['ssl_password'])
if self.config['ssl_crlfile']:
if not hasattr(ssl, 'VERIFY_CRL_CHECK_LEAF'):
raise RuntimeError('This version of Python does not support ssl_crlfile!')
log.info('%s: Loading SSL CRL from %s', self, self.config['ssl_crlfile'])
self._ssl_context.load_verify_locations(self.config['ssl_crlfile'])
# pylint: disable=no-member
self._ssl_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
if self.config['ssl_ciphers']:
log.info('%s: Setting SSL Ciphers: %s', self, self.config['ssl_ciphers'])
self._ssl_context.set_ciphers(self.config['ssl_ciphers'])
log.debug('%s: wrapping socket in ssl context', self)
try:
self._sock = self._ssl_context.wrap_socket(
self._sock,
server_hostname=self.host,
do_handshake_on_connect=False)
except ssl.SSLError as e:
log.exception('%s: Failed to wrap socket in SSLContext!', self)
self.close(e)
def _try_handshake(self):
assert self.config['security_protocol'] in ('SSL', 'SASL_SSL')
try:
self._sock.do_handshake()
return True
# old ssl in python2.6 will swallow all SSLErrors here...
except (SSLWantReadError, SSLWantWriteError):
pass
except (SSLZeroReturnError, ConnectionError, TimeoutError, SSLEOFError):
log.warning('SSL connection closed by server during handshake.')
self.close(Errors.KafkaConnectionError('SSL connection closed by server during handshake'))
# Other SSLErrors will be raised to user
return False
def _try_authenticate(self):
assert self.config['api_version'] is None or self.config['api_version'] >= (0, 10)
if self._sasl_auth_future is None:
# Build a SaslHandShakeRequest message
request = SaslHandShakeRequest[0](self.config['sasl_mechanism'])
future = Future()
sasl_response = self._send(request)
sasl_response.add_callback(self._handle_sasl_handshake_response, future)
sasl_response.add_errback(lambda f, e: f.failure(e), future)
self._sasl_auth_future = future
for r, f in self.recv():
f.success(r)
# A connection error could trigger close() which will reset the future
if self._sasl_auth_future is None:
return False
elif self._sasl_auth_future.failed():
ex = self._sasl_auth_future.exception
if not isinstance(ex, Errors.KafkaConnectionError):
raise ex # pylint: disable-msg=raising-bad-type
return self._sasl_auth_future.succeeded()
def _handle_sasl_handshake_response(self, future, response):
error_type = Errors.for_code(response.error_code)
if error_type is not Errors.NoError:
error = error_type(self)
self.close(error=error)
return future.failure(error_type(self))
if self.config['sasl_mechanism'] not in response.enabled_mechanisms:
return future.failure(
Errors.UnsupportedSaslMechanismError(
'Kafka broker does not support %s sasl mechanism. Enabled mechanisms are: %s'
% (self.config['sasl_mechanism'], response.enabled_mechanisms)))
elif self.config['sasl_mechanism'] == 'PLAIN':
return self._try_authenticate_plain(future)
elif self.config['sasl_mechanism'] == 'GSSAPI':
return self._try_authenticate_gssapi(future)
elif self.config['sasl_mechanism'] == 'OAUTHBEARER':
return self._try_authenticate_oauth(future)
else:
return future.failure(
Errors.UnsupportedSaslMechanismError(
'kafka-python does not support SASL mechanism %s' %
self.config['sasl_mechanism']))
def _send_bytes(self, data):
"""Send some data via non-blocking IO
Note: this method is not synchronized internally; you should
always hold the _lock before calling
Returns: number of bytes
Raises: socket exception
"""
total_sent = 0
while total_sent < len(data):
try:
sent_bytes = self._sock.send(data[total_sent:])
total_sent += sent_bytes
except (SSLWantReadError, SSLWantWriteError):
break
except (ConnectionError, TimeoutError) as e:
if six.PY2 and e.errno == errno.EWOULDBLOCK:
break
raise
except BlockingIOError:
if six.PY3:
break
raise
return total_sent
def _send_bytes_blocking(self, data):
self._sock.settimeout(self.config['request_timeout_ms'] / 1000)
total_sent = 0
try:
while total_sent < len(data):
sent_bytes = self._sock.send(data[total_sent:])
total_sent += sent_bytes
if total_sent != len(data):
raise ConnectionError('Buffer overrun during socket send')
return total_sent
finally:
self._sock.settimeout(0.0)
def _recv_bytes_blocking(self, n):
self._sock.settimeout(self.config['request_timeout_ms'] / 1000)
try:
data = b''
while len(data) < n:
fragment = self._sock.recv(n - len(data))
if not fragment:
raise ConnectionError('Connection reset during recv')
data += fragment
return data
finally:
self._sock.settimeout(0.0)
def _try_authenticate_plain(self, future):
if self.config['security_protocol'] == 'SASL_PLAINTEXT':
log.warning('%s: Sending username and password in the clear', self)
data = b''
# Send PLAIN credentials per RFC-4616
msg = bytes('\0'.join([self.config['sasl_plain_username'],
self.config['sasl_plain_username'],
self.config['sasl_plain_password']]).encode('utf-8'))
size = Int32.encode(len(msg))
err = None
close = False
with self._lock:
if not self._can_send_recv():
err = Errors.NodeNotReadyError(str(self))
close = False
else:
try:
self._send_bytes_blocking(size + msg)
# The server will send a zero sized message (that is Int32(0)) on success.
# The connection is closed on failure
data = self._recv_bytes_blocking(4)
except (ConnectionError, TimeoutError) as e:
log.exception("%s: Error receiving reply from server", self)
err = Errors.KafkaConnectionError("%s: %s" % (self, e))
close = True
if err is not None:
if close:
self.close(error=err)
return future.failure(err)
if data != b'\x00\x00\x00\x00':
error = Errors.AuthenticationFailedError('Unrecognized response during authentication')
return future.failure(error)
log.info('%s: Authenticated as %s via PLAIN', self, self.config['sasl_plain_username'])
return future.success(True)
def _try_authenticate_gssapi(self, future):
kerberos_damin_name = self.config['sasl_kerberos_domain_name'] or self.host
auth_id = self.config['sasl_kerberos_service_name'] + '@' + kerberos_damin_name
gssapi_name = gssapi.Name(
auth_id,
name_type=gssapi.NameType.hostbased_service
).canonicalize(gssapi.MechType.kerberos)
log.debug('%s: GSSAPI name: %s', self, gssapi_name)
err = None
close = False
with self._lock:
if not self._can_send_recv():
err = Errors.NodeNotReadyError(str(self))
close = False
else:
# Establish security context and negotiate protection level
# For reference RFC 2222, section 7.2.1
try:
# Exchange tokens until authentication either succeeds or fails
client_ctx = gssapi.SecurityContext(name=gssapi_name, usage='initiate')
received_token = None
while not client_ctx.complete:
# calculate an output token from kafka token (or None if first iteration)
output_token = client_ctx.step(received_token)
# pass output token to kafka, or send empty response if the security
# context is complete (output token is None in that case)
if output_token is None:
self._send_bytes_blocking(Int32.encode(0))
else:
msg = output_token
size = Int32.encode(len(msg))
self._send_bytes_blocking(size + msg)
# The server will send a token back. Processing of this token either
# establishes a security context, or it needs further token exchange.
# The gssapi will be able to identify the needed next step.
# The connection is closed on failure.
header = self._recv_bytes_blocking(4)
(token_size,) = struct.unpack('>i', header)
received_token = self._recv_bytes_blocking(token_size)
# Process the security layer negotiation token, sent by the server
# once the security context is established.
# unwraps message containing supported protection levels and msg size
msg = client_ctx.unwrap(received_token).message
# Kafka currently doesn't support integrity or confidentiality security layers, so we
# simply set QoP to 'auth' only (first octet). We reuse the max message size proposed
# by the server
msg = Int8.encode(SASL_QOP_AUTH & Int8.decode(io.BytesIO(msg[0:1]))) + msg[1:]
# add authorization identity to the response, GSS-wrap and send it
msg = client_ctx.wrap(msg + auth_id.encode(), False).message
size = Int32.encode(len(msg))
self._send_bytes_blocking(size + msg)
except (ConnectionError, TimeoutError) as e:
log.exception("%s: Error receiving reply from server", self)
err = Errors.KafkaConnectionError("%s: %s" % (self, e))
close = True
except Exception as e:
err = e
close = True
if err is not None:
if close:
self.close(error=err)
return future.failure(err)
log.info('%s: Authenticated as %s via GSSAPI', self, gssapi_name)
return future.success(True)
def _try_authenticate_oauth(self, future):
data = b''
msg = bytes(self._build_oauth_client_request().encode("utf-8"))
size = Int32.encode(len(msg))
err = None
close = False
with self._lock:
if not self._can_send_recv():
err = Errors.NodeNotReadyError(str(self))
close = False
else:
try:
# Send SASL OAuthBearer request with OAuth token
self._send_bytes_blocking(size + msg)
# The server will send a zero sized message (that is Int32(0)) on success.
# The connection is closed on failure
data = self._recv_bytes_blocking(4)
except (ConnectionError, TimeoutError) as e:
log.exception("%s: Error receiving reply from server", self)
err = Errors.KafkaConnectionError("%s: %s" % (self, e))
close = True
if err is not None:
if close:
self.close(error=err)
return future.failure(err)
if data != b'\x00\x00\x00\x00':
error = Errors.AuthenticationFailedError('Unrecognized response during authentication')
return future.failure(error)
log.info('%s: Authenticated via OAuth', self)
return future.success(True)
def _build_oauth_client_request(self):
token_provider = self.config['sasl_oauth_token_provider']
return "n,,\x01auth=Bearer {}{}\x01\x01".format(token_provider.token(), self._token_extensions())
def _token_extensions(self):
"""
Return a string representation of the OPTIONAL key-value pairs that can be sent with an OAUTHBEARER
initial request.
"""
token_provider = self.config['sasl_oauth_token_provider']
# Only run if the #extensions() method is implemented by the clients Token Provider class
# Builds up a string separated by \x01 via a dict of key value pairs
if callable(getattr(token_provider, "extensions", None)) and len(token_provider.extensions()) > 0:
msg = "\x01".join(["{}={}".format(k, v) for k, v in token_provider.extensions().items()])
return "\x01" + msg
else:
return ""
def blacked_out(self):
"""
Return true if we are disconnected from the given node and can't
re-establish a connection yet
"""
if self.state is ConnectionStates.DISCONNECTED:
if time.time() < self.last_attempt + self._reconnect_backoff:
return True
return False
def connection_delay(self):
"""
Return the number of milliseconds to wait, based on the connection
state, before attempting to send data. When disconnected, this respects
the reconnect backoff time. When connecting or connected, returns a very
large number to handle slow/stalled connections.
"""
time_waited = time.time() - (self.last_attempt or 0)
if self.state is ConnectionStates.DISCONNECTED:
return max(self._reconnect_backoff - time_waited, 0) * 1000
else:
# When connecting or connected, we should be able to delay
# indefinitely since other events (connection or data acked) will
# cause a wakeup once data can be sent.
return float('inf')
def connected(self):
"""Return True iff socket is connected."""
return self.state is ConnectionStates.CONNECTED
def connecting(self):
"""Returns True if still connecting (this may encompass several
different states, such as SSL handshake, authorization, etc)."""
return self.state in (ConnectionStates.CONNECTING,
ConnectionStates.HANDSHAKE,
ConnectionStates.AUTHENTICATING)
def disconnected(self):
"""Return True iff socket is closed"""
return self.state is ConnectionStates.DISCONNECTED
def _reset_reconnect_backoff(self):
self._failures = 0
self._reconnect_backoff = self.config['reconnect_backoff_ms'] / 1000.0
def _update_reconnect_backoff(self):
# Do not mark as failure if there are more dns entries available to try
if len(self._gai) > 0:
return
if self.config['reconnect_backoff_max_ms'] > self.config['reconnect_backoff_ms']:
self._failures += 1
self._reconnect_backoff = self.config['reconnect_backoff_ms'] * 2 ** (self._failures - 1)
self._reconnect_backoff = min(self._reconnect_backoff, self.config['reconnect_backoff_max_ms'])
self._reconnect_backoff *= uniform(0.8, 1.2)
self._reconnect_backoff /= 1000.0
log.debug('%s: reconnect backoff %s after %s failures', self, self._reconnect_backoff, self._failures)
def _close_socket(self):
if hasattr(self, '_sock') and self._sock is not None:
self._sock.close()
self._sock = None
def __del__(self):
self._close_socket()
def close(self, error=None):
"""Close socket and fail all in-flight-requests.
Arguments:
error (Exception, optional): pending in-flight-requests
will be failed with this exception.
Default: kafka.errors.KafkaConnectionError.
"""
if self.state is ConnectionStates.DISCONNECTED:
return
with self._lock:
if self.state is ConnectionStates.DISCONNECTED:
return
log.info('%s: Closing connection. %s', self, error or '')
self._update_reconnect_backoff()
self._sasl_auth_future = None
self._protocol = KafkaProtocol(
client_id=self.config['client_id'],
api_version=self.config['api_version'])
self._send_buffer = b''
if error is None:
error = Errors.Cancelled(str(self))
ifrs = list(self.in_flight_requests.items())
self.in_flight_requests.clear()
self.state = ConnectionStates.DISCONNECTED
# To avoid race conditions and/or deadlocks
# keep a reference to the socket but leave it
# open until after the state_change_callback
# This should give clients a change to deregister
# the socket fd from selectors cleanly.
sock = self._sock
self._sock = None
# drop lock before state change callback and processing futures
self.config['state_change_callback'](self.node_id, sock, self)
sock.close()
for (_correlation_id, (future, _timestamp)) in ifrs:
future.failure(error)
def _can_send_recv(self):
"""Return True iff socket is ready for requests / responses"""
return self.state in (ConnectionStates.AUTHENTICATING,
ConnectionStates.CONNECTED)
def send(self, request, blocking=True):
"""Queue request for async network send, return Future()"""
future = Future()
if self.connecting():
return future.failure(Errors.NodeNotReadyError(str(self)))
elif not self.connected():
return future.failure(Errors.KafkaConnectionError(str(self)))
elif not self.can_send_more():
return future.failure(Errors.TooManyInFlightRequests(str(self)))
return self._send(request, blocking=blocking)
def _send(self, request, blocking=True):
future = Future()
with self._lock:
if not self._can_send_recv():
# In this case, since we created the future above,
# we know there are no callbacks/errbacks that could fire w/
# lock. So failing + returning inline should be safe
return future.failure(Errors.NodeNotReadyError(str(self)))
correlation_id = self._protocol.send_request(request)
log.debug('%s Request %d: %s', self, correlation_id, request)
if request.expect_response():
sent_time = time.time()
assert correlation_id not in self.in_flight_requests, 'Correlation ID already in-flight!'
self.in_flight_requests[correlation_id] = (future, sent_time)
else:
future.success(None)
# Attempt to replicate behavior from prior to introduction of
# send_pending_requests() / async sends
if blocking:
self.send_pending_requests()
return future
def send_pending_requests(self):
"""Attempts to send pending requests messages via blocking IO
If all requests have been sent, return True
Otherwise, if the socket is blocked and there are more bytes to send,
return False.
"""
try:
with self._lock:
if not self._can_send_recv():
return False
data = self._protocol.send_bytes()
total_bytes = self._send_bytes_blocking(data)
if self._sensors:
self._sensors.bytes_sent.record(total_bytes)
return True
except (ConnectionError, TimeoutError) as e:
log.exception("Error sending request data to %s", self)
error = Errors.KafkaConnectionError("%s: %s" % (self, e))
self.close(error=error)
return False
def send_pending_requests_v2(self):
"""Attempts to send pending requests messages via non-blocking IO
If all requests have been sent, return True
Otherwise, if the socket is blocked and there are more bytes to send,
return False.
"""
try:
with self._lock:
if not self._can_send_recv():
return False
# _protocol.send_bytes returns encoded requests to send
# we send them via _send_bytes()
# and hold leftover bytes in _send_buffer
if not self._send_buffer:
self._send_buffer = self._protocol.send_bytes()
total_bytes = 0
if self._send_buffer:
total_bytes = self._send_bytes(self._send_buffer)
self._send_buffer = self._send_buffer[total_bytes:]
if self._sensors:
self._sensors.bytes_sent.record(total_bytes)
# Return True iff send buffer is empty
return len(self._send_buffer) == 0
except (ConnectionError, TimeoutError, Exception) as e:
log.exception("Error sending request data to %s", self)
error = Errors.KafkaConnectionError("%s: %s" % (self, e))
self.close(error=error)
return False
def can_send_more(self):
"""Return True unless there are max_in_flight_requests_per_connection."""
max_ifrs = self.config['max_in_flight_requests_per_connection']
return len(self.in_flight_requests) < max_ifrs
def recv(self):
"""Non-blocking network receive.
Return list of (response, future) tuples
"""
responses = self._recv()
if not responses and self.requests_timed_out():
log.warning('%s timed out after %s ms. Closing connection.',
self, self.config['request_timeout_ms'])
self.close(error=Errors.RequestTimedOutError(
'Request timed out after %s ms' %
self.config['request_timeout_ms']))
return ()
# augment responses w/ correlation_id, future, and timestamp
for i, (correlation_id, response) in enumerate(responses):
try:
with self._lock:
(future, timestamp) = self.in_flight_requests.pop(correlation_id)
except KeyError:
self.close(Errors.KafkaConnectionError('Received unrecognized correlation id'))
return ()
latency_ms = (time.time() - timestamp) * 1000
if self._sensors:
self._sensors.request_time.record(latency_ms)
log.debug('%s Response %d (%s ms): %s', self, correlation_id, latency_ms, response)
responses[i] = (response, future)
return responses
def _recv(self):
"""Take all available bytes from socket, return list of any responses from parser"""
recvd = []
err = None
with self._lock:
if not self._can_send_recv():
log.warning('%s cannot recv: socket not connected', self)
return ()
while len(recvd) < self.config['sock_chunk_buffer_count']:
try:
data = self._sock.recv(self.config['sock_chunk_bytes'])
# We expect socket.recv to raise an exception if there are no
# bytes available to read from the socket in non-blocking mode.
# but if the socket is disconnected, we will get empty data
# without an exception raised
if not data:
log.error('%s: socket disconnected', self)
err = Errors.KafkaConnectionError('socket disconnected')
break
else:
recvd.append(data)
except (SSLWantReadError, SSLWantWriteError):
break
except (ConnectionError, TimeoutError) as e:
if six.PY2 and e.errno == errno.EWOULDBLOCK:
break
log.exception('%s: Error receiving network data'
' closing socket', self)
err = Errors.KafkaConnectionError(e)
break
except BlockingIOError:
if six.PY3:
break
# For PY2 this is a catchall and should be re-raised
raise
# Only process bytes if there was no connection exception
if err is None:
recvd_data = b''.join(recvd)
if self._sensors:
self._sensors.bytes_received.record(len(recvd_data))
# We need to keep the lock through protocol receipt
# so that we ensure that the processed byte order is the
# same as the received byte order
try:
return self._protocol.receive_bytes(recvd_data)
except Errors.KafkaProtocolError as e:
err = e
self.close(error=err)
return ()
def requests_timed_out(self):
with self._lock:
if self.in_flight_requests:
get_timestamp = lambda v: v[1]
oldest_at = min(map(get_timestamp,
self.in_flight_requests.values()))
timeout = self.config['request_timeout_ms'] / 1000.0
if time.time() >= oldest_at + timeout:
return True
return False
def _handle_api_version_response(self, response):
error_type = Errors.for_code(response.error_code)
assert error_type is Errors.NoError, "API version check failed"
self._api_versions = dict([
(api_key, (min_version, max_version))
for api_key, min_version, max_version in response.api_versions
])
return self._api_versions
def get_api_versions(self):
if self._api_versions is not None:
return self._api_versions
version = self.check_version()
if version < (0, 10, 0):
raise Errors.UnsupportedVersionError(
"ApiVersion not supported by cluster version {} < 0.10.0"
.format(version))
# _api_versions is set as a side effect of check_versions() on a cluster
# that supports 0.10.0 or later
return self._api_versions
def _infer_broker_version_from_api_versions(self, api_versions):
# The logic here is to check the list of supported request versions
# in reverse order. As soon as we find one that works, return it
test_cases = [
# format (<broker version>, <needed struct>)
((1, 0, 0), MetadataRequest[5]),
((0, 11, 0), MetadataRequest[4]),
((0, 10, 2), OffsetFetchRequest[2]),
((0, 10, 1), MetadataRequest[2]),
]
# Get the best match of test cases
for broker_version, struct in sorted(test_cases, reverse=True):
if struct.API_KEY not in api_versions:
continue
min_version, max_version = api_versions[struct.API_KEY]
if min_version <= struct.API_VERSION <= max_version:
return broker_version
# We know that ApiVersionResponse is only supported in 0.10+
# so if all else fails, choose that
return (0, 10, 0)
def check_version(self, timeout=2, strict=False, topics=[]):
"""Attempt to guess the broker version.
Note: This is a blocking call.
Returns: version tuple, i.e. (0, 10), (0, 9), (0, 8, 2), ...
"""
timeout_at = time.time() + timeout
log.info('Probing node %s broker version', self.node_id)
# Monkeypatch some connection configurations to avoid timeouts
override_config = {
'request_timeout_ms': timeout * 1000,
'max_in_flight_requests_per_connection': 5
}
stashed = {}
for key in override_config:
stashed[key] = self.config[key]
self.config[key] = override_config[key]
# kafka kills the connection when it doesn't recognize an API request
# so we can send a test request and then follow immediately with a
# vanilla MetadataRequest. If the server did not recognize the first
# request, both will be failed with a ConnectionError that wraps
# socket.error (32, 54, or 104)
from kafka.protocol.admin import ApiVersionRequest, ListGroupsRequest
from kafka.protocol.commit import OffsetFetchRequest, GroupCoordinatorRequest
test_cases = [
# All cases starting from 0.10 will be based on ApiVersionResponse
((0, 10), ApiVersionRequest[0]()),
((0, 9), ListGroupsRequest[0]()),
((0, 8, 2), GroupCoordinatorRequest[0]('kafka-python-default-group')),
((0, 8, 1), OffsetFetchRequest[0]('kafka-python-default-group', [])),
((0, 8, 0), MetadataRequest[0](topics)),
]
for version, request in test_cases:
if not self.connect_blocking(timeout_at - time.time()):
raise Errors.NodeNotReadyError()
f = self.send(request)
# HACK: sleeping to wait for socket to send bytes
time.sleep(0.1)
# when broker receives an unrecognized request API
# it abruptly closes our socket.
# so we attempt to send a second request immediately
# that we believe it will definitely recognize (metadata)
# the attempt to write to a disconnected socket should
# immediately fail and allow us to infer that the prior
# request was unrecognized
mr = self.send(MetadataRequest[0](topics))
selector = self.config['selector']()
selector.register(self._sock, selectors.EVENT_READ)
while not (f.is_done and mr.is_done):
selector.select(1)
for response, future in self.recv():
future.success(response)
selector.close()
if f.succeeded():
if isinstance(request, ApiVersionRequest[0]):
# Starting from 0.10 kafka broker we determine version
# by looking at ApiVersionResponse
api_versions = self._handle_api_version_response(f.value)
version = self._infer_broker_version_from_api_versions(api_versions)
log.info('Broker version identifed as %s', '.'.join(map(str, version)))
log.info('Set configuration api_version=%s to skip auto'
' check_version requests on startup', version)
break
# Only enable strict checking to verify that we understand failure
# modes. For most users, the fact that the request failed should be
# enough to rule out a particular broker version.
if strict:
# If the socket flush hack did not work (which should force the
# connection to close and fail all pending requests), then we
# get a basic Request Timeout. This is not ideal, but we'll deal
if isinstance(f.exception, Errors.RequestTimedOutError):
pass
# 0.9 brokers do not close the socket on unrecognized api
# requests (bug...). In this case we expect to see a correlation
# id mismatch
elif (isinstance(f.exception, Errors.CorrelationIdError) and
version == (0, 10)):
pass
elif six.PY2:
assert isinstance(f.exception.args[0], socket.error)
assert f.exception.args[0].errno in (32, 54, 104)
else:
assert isinstance(f.exception.args[0], ConnectionError)
log.info("Broker is not v%s -- it did not recognize %s",
version, request.__class__.__name__)
else:
raise Errors.UnrecognizedBrokerVersion()
for key in stashed:
self.config[key] = stashed[key]
return version
def __str__(self):
return "<BrokerConnection node_id=%s host=%s:%d %s [%s %s]>" % (
self.node_id, self.host, self.port, self.state,
AFI_NAMES[self._sock_afi], self._sock_addr)
class BrokerConnectionMetrics(object):
def __init__(self, metrics, metric_group_prefix, node_id):
self.metrics = metrics
# Any broker may have registered summary metrics already
# but if not, we need to create them so we can set as parents below
all_conns_transferred = metrics.get_sensor('bytes-sent-received')
if not all_conns_transferred:
metric_group_name = metric_group_prefix + '-metrics'
bytes_transferred = metrics.sensor('bytes-sent-received')
bytes_transferred.add(metrics.metric_name(
'network-io-rate', metric_group_name,
'The average number of network operations (reads or writes) on all'
' connections per second.'), Rate(sampled_stat=Count()))
bytes_sent = metrics.sensor('bytes-sent',
parents=[bytes_transferred])
bytes_sent.add(metrics.metric_name(
'outgoing-byte-rate', metric_group_name,
'The average number of outgoing bytes sent per second to all'
' servers.'), Rate())
bytes_sent.add(metrics.metric_name(
'request-rate', metric_group_name,
'The average number of requests sent per second.'),
Rate(sampled_stat=Count()))
bytes_sent.add(metrics.metric_name(
'request-size-avg', metric_group_name,
'The average size of all requests in the window.'), Avg())
bytes_sent.add(metrics.metric_name(
'request-size-max', metric_group_name,
'The maximum size of any request sent in the window.'), Max())
bytes_received = metrics.sensor('bytes-received',
parents=[bytes_transferred])
bytes_received.add(metrics.metric_name(
'incoming-byte-rate', metric_group_name,
'Bytes/second read off all sockets'), Rate())
bytes_received.add(metrics.metric_name(
'response-rate', metric_group_name,
'Responses received sent per second.'),
Rate(sampled_stat=Count()))
request_latency = metrics.sensor('request-latency')
request_latency.add(metrics.metric_name(
'request-latency-avg', metric_group_name,
'The average request latency in ms.'),
Avg())
request_latency.add(metrics.metric_name(
'request-latency-max', metric_group_name,
'The maximum request latency in ms.'),
Max())
# if one sensor of the metrics has been registered for the connection,
# then all other sensors should have been registered; and vice versa
node_str = 'node-{0}'.format(node_id)
node_sensor = metrics.get_sensor(node_str + '.bytes-sent')
if not node_sensor:
metric_group_name = metric_group_prefix + '-node-metrics.' + node_str
bytes_sent = metrics.sensor(
node_str + '.bytes-sent',
parents=[metrics.get_sensor('bytes-sent')])
bytes_sent.add(metrics.metric_name(
'outgoing-byte-rate', metric_group_name,
'The average number of outgoing bytes sent per second.'),
Rate())
bytes_sent.add(metrics.metric_name(
'request-rate', metric_group_name,
'The average number of requests sent per second.'),
Rate(sampled_stat=Count()))
bytes_sent.add(metrics.metric_name(
'request-size-avg', metric_group_name,
'The average size of all requests in the window.'),
Avg())
bytes_sent.add(metrics.metric_name(
'request-size-max', metric_group_name,
'The maximum size of any request sent in the window.'),
Max())
bytes_received = metrics.sensor(
node_str + '.bytes-received',
parents=[metrics.get_sensor('bytes-received')])
bytes_received.add(metrics.metric_name(
'incoming-byte-rate', metric_group_name,
'Bytes/second read off node-connection socket'),
Rate())
bytes_received.add(metrics.metric_name(
'response-rate', metric_group_name,
'The average number of responses received per second.'),
Rate(sampled_stat=Count()))
request_time = metrics.sensor(
node_str + '.latency',
parents=[metrics.get_sensor('request-latency')])
request_time.add(metrics.metric_name(
'request-latency-avg', metric_group_name,
'The average request latency in ms.'),
Avg())
request_time.add(metrics.metric_name(
'request-latency-max', metric_group_name,
'The maximum request latency in ms.'),
Max())
self.bytes_sent = metrics.sensor(node_str + '.bytes-sent')
self.bytes_received = metrics.sensor(node_str + '.bytes-received')
self.request_time = metrics.sensor(node_str + '.latency')
def _address_family(address):
"""
Attempt to determine the family of an address (or hostname)
:return: either socket.AF_INET or socket.AF_INET6 or socket.AF_UNSPEC if the address family
could not be determined
"""
if address.startswith('[') and address.endswith(']'):
return socket.AF_INET6
for af in (socket.AF_INET, socket.AF_INET6):
try:
socket.inet_pton(af, address)
return af
except (ValueError, AttributeError, socket.error):
continue
return socket.AF_UNSPEC
def get_ip_port_afi(host_and_port_str):
"""
Parse the IP and port from a string in the format of:
* host_or_ip <- Can be either IPv4 address literal or hostname/fqdn
* host_or_ipv4:port <- Can be either IPv4 address literal or hostname/fqdn
* [host_or_ip] <- IPv6 address literal
* [host_or_ip]:port. <- IPv6 address literal
.. note:: IPv6 address literals with ports *must* be enclosed in brackets
.. note:: If the port is not specified, default will be returned.
:return: tuple (host, port, afi), afi will be socket.AF_INET or socket.AF_INET6 or socket.AF_UNSPEC
"""
host_and_port_str = host_and_port_str.strip()
if host_and_port_str.startswith('['):
af = socket.AF_INET6
host, rest = host_and_port_str[1:].split(']')
if rest:
port = int(rest[1:])
else:
port = DEFAULT_KAFKA_PORT
return host, port, af
else:
if ':' not in host_and_port_str:
af = _address_family(host_and_port_str)
return host_and_port_str, DEFAULT_KAFKA_PORT, af
else:
# now we have something with a colon in it and no square brackets. It could be
# either an IPv6 address literal (e.g., "::1") or an IP:port pair or a host:port pair
try:
# if it decodes as an IPv6 address, use that
socket.inet_pton(socket.AF_INET6, host_and_port_str)
return host_and_port_str, DEFAULT_KAFKA_PORT, socket.AF_INET6
except AttributeError:
log.warning('socket.inet_pton not available on this platform.'
' consider `pip install win_inet_pton`')
pass
except (ValueError, socket.error):
# it's a host:port pair
pass
host, port = host_and_port_str.rsplit(':', 1)
port = int(port)
af = _address_family(host)
return host, port, af
def collect_hosts(hosts, randomize=True):
"""
Collects a comma-separated set of hosts (host:port) and optionally
randomize the returned list.
"""
if isinstance(hosts, six.string_types):
hosts = hosts.strip().split(',')
result = []
afi = socket.AF_INET
for host_port in hosts:
host, port, afi = get_ip_port_afi(host_port)
if port < 0:
port = DEFAULT_KAFKA_PORT
result.append((host, port, afi))
if randomize:
shuffle(result)
return result
def is_inet_4_or_6(gai):
"""Given a getaddrinfo struct, return True iff ipv4 or ipv6"""
return gai[0] in (socket.AF_INET, socket.AF_INET6)
def dns_lookup(host, port, afi=socket.AF_UNSPEC):
"""Returns a list of getaddrinfo structs, optionally filtered to an afi (ipv4 / ipv6)"""
# XXX: all DNS functions in Python are blocking. If we really
# want to be non-blocking here, we need to use a 3rd-party
# library like python-adns, or move resolution onto its
# own thread. This will be subject to the default libc
# name resolution timeout (5s on most Linux boxes)
try:
return list(filter(is_inet_4_or_6,
socket.getaddrinfo(host, port, afi,
socket.SOCK_STREAM)))
except socket.gaierror as ex:
log.warning('DNS lookup failed for %s:%d,'
' exception was %s. Is your'
' advertised.listeners (called'
' advertised.host.name before Kafka 9)'
' correct and resolvable?',
host, port, ex)
return []
| apache-2.0 |
sacsant/avocado-misc-tests | io/disk/Avago_storage_adapter/avago3008.py | 4 | 8849 | #!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
#
# Copyright: 2016 IBM
# Author: Venkat Rao B <vrbagal1@linux.vnet.ibm.com>
"""
This script will list all the adapter connected to the system.
"""
import os
import time
from avocado import Test
from avocado.utils import process
class Avago3008(Test):
"""
This script lists all the LSI adapters attached on the machine
"""
def setUp(self):
"""
Lists all available Avago adapters (does not need ctlr #>
"""
self.controller = int(self.params.get('controller', default='0'))
self.raidlevel = str(self.params.get('raidlevel', default='0'))
self.disk = str(self.params.get('disk_bay')).split(" ")
self.spare = str(self.params.get('spare'))
self.size = int(self.params.get('size', default='max'))
self.tool_location = self.params.get('tool_location')
self.clear_config = self.params.get('clear_config', default=False)
self.setup_raid = self.params.get('setup_raid', default=False)
self.cleanup_raid = self.params.get('cleanup_raid', default=False)
if not self.disk:
self.cancel("Please provide disks to run the tests")
self.number_of_disk = len(self.disk)
if (not os.path.isfile(self.tool_location) or not
os.access(self.tool_location, os.X_OK)):
self.cancel()
self.dict_raid = {'raid0': [2, None, None], 'raid1': [2, 2, None],
'raid1e': [3, None, None],
'raid10': [4, None, 'Even']}
self.value = self.dict_raid[self.raidlevel]
if self.number_of_disk < self.value[0]:
self.cancel("please give enough drives to perform the test")
if self.value[1] is not None:
self.disk = self.disk[0:self.value[1]]
if self.value[2] == 'Even':
if self.number_of_disk % 2 != 0:
self.disk = self.disk[:-1]
self.raid_disk = " ".join(self.disk).strip(" ")
def test_run(self):
"""
Decides which functions to run for given raid_level
"""
if self.clear_config:
self.clear_configuration()
if self.setup_raid:
self.createraid()
elif self.cleanup_raid:
self.deleteraid()
elif self.raidlevel == 'raid0':
self.basictest()
else:
self.extensivetest()
def basictest(self):
"""
This function does only create and delete Raid
"""
self.adapterdetails()
self.createraid()
self.adapter_status("Volume state")
self.adapterdetails()
self.deleteraid()
self.adapter_status("Volume state")
self.logir()
def extensivetest(self):
"""
Lists all the LSI adapters attached to the mahcine
:return:
"""
self.adapterlist()
self.adapterdetails()
self.createraid()
self.backgroundinit()
self.adapterdetails()
self.adapter_status("Volume state")
self.set_online_offline("offline")
self.set_online_offline("online")
for _ in range(0, 5):
for state in ['offline', 'online']:
self.set_online_offline(state)
time.sleep(10)
if self.spare and self.clear_config:
self.hotspare()
self.rebuild()
self.consistcheck()
self.deleteraid()
self.logir()
self.adapterdetails()
self.adapter_status("Volume state")
def clear_configuration(self):
"""
Deletes the existing raid in the LSI controller
"""
cmd = "%s %d delete noprompt" % (self.tool_location, self.controller)
if process.system(cmd, ignore_status=True, shell=True) != 0:
self.fail("Unable to clear entire configuration before starting")
def adapterlist(self):
"""
Lists all the LSI adapters attached to the mahcine
:return:
"""
cmd = "%s list" % self.tool_location
if process.system(cmd, ignore_status=True, shell=True) != 0:
self.fail("Failed to list all the Avogo adapters")
def adapterdetails(self):
"""
Display controller, volume and physical device info
"""
cmd = "%s %d display" % (self.tool_location, self.controller)
if process.system(cmd, ignore_status=True, shell=True) != 0:
self.fail("Failed to display details of drives and VR vloumes")
def createraid(self):
"""
This function creates raid array
"""
cmd = "%s %d create %s %s %s vr1 noprompt" \
% (self.tool_location, self.controller, self.raidlevel,
self.size, self.raid_disk)
if process.system(cmd, ignore_status=True, shell=True) != 0:
self.fail("Failed to create raid on the drives")
def hotspare(self):
"""
This is a helper function to create hot-spare
"""
cmd = "echo -e 'YES\nNO' | %s %d hotspare %s" \
% (self.tool_location, self.controller, self.spare)
if process.system(cmd, ignore_status=True, shell=True) != 0:
self.fail("Failed to set hotspare drive")
def backgroundinit(self):
"""
Checks if BGI starts automatically, and if so waits
till it is completed
"""
self.sleepfunction()
def consistcheck(self):
"""
This function starts CC on a Raid array
"""
cmd = "%s %d constchk %d noprompt" \
% (self.tool_location, self.controller, self.volumeid())
if process.system(cmd, ignore_status=True, shell=True) != 0:
self.fail("Failed to start CC on raid array VR1")
self.sleepfunction()
def logir(self):
"""
This function stores all the IR logs
"""
cmd = "%s %d logir upload" % (self.tool_location, self.controller)
if process.system(cmd, ignore_status=True, shell=True) != 0:
self.fail("Failed to upload the logs")
cmd = "%s %d logir clear noprompt" % (self.tool_location,
self.controller)
if process.system(cmd, ignore_status=True, shell=True) != 0:
self.fail("Failed to clear the logs on controller")
def rebuild(self):
"""
This functions waits for the rebuild to complete on a Raid
"""
self.set_online_offline("offline")
while self.adapter_status("Volume state").strip("\n") != 'Optimal':
time.sleep(30)
def set_online_offline(self, state):
"""
This is a helper function, to change the state of the drives
"""
cmd = "%s %d set%s %s" \
% (self.tool_location, self.controller, state, self.disk[0])
if process.system(cmd, ignore_status=True, shell=True) != 0:
self.fail("Failed to set drive to %s" % state)
def adapter_status(self, var):
"""
This is a helper function, to check the status of the adapter
"""
cmd = "%s %d status" % (self.tool_location, self.controller)
output = process.run(cmd, shell=True, ignore_status=True)
if output.exit_status != 0:
self.fail("Failed to display the status of the adapter")
for i in output.stdout.splitlines():
if var in i:
return i.split(":")[-1].strip(" ").strip("\n")
def deleteraid(self):
"""
This function deletes raid array
"""
cmd = "%s %d deletevolume %d noprompt" % (
self.tool_location, self.controller, self.volumeid())
if process.system(cmd, ignore_status=True, shell=True) != 0:
self.fail("Failed to delete raid array VR1")
def volumeid(self):
"""
This function returns volume ID of the IR volume
"""
cmd = "%s %d display | grep 'vr1' -B 2 | grep 'Volume ID' | \
awk '{print $4}'" % (self.tool_location, self.controller)
volume_id = int(process.system_output(cmd, shell=True))
return int(volume_id)
def sleepfunction(self):
"""
This function waits, till the current operation is complete
"""
while self.adapter_status("Current operation") != 'None':
time.sleep(10)
| gpl-2.0 |
mudbungie/NetExplorer | env/lib/python3.4/site-packages/bulbs/tests/element_tests.py | 3 | 5050 | # -*- coding: utf-8 -*-
#
# Copyright 2011 James Thornton (http://jamesthornton.com)
# BSD License (see LICENSE for details)
#
import time
import unittest
from bulbs import config
from bulbs.element import Vertex, VertexProxy, EdgeProxy, Edge
from .testcase import BulbsTestCase
class VertexProxyTestCase(BulbsTestCase):
def setUp(self):
self.vertices = VertexProxy(Vertex,self.client)
def test_create(self):
james = self.vertices.create({'name':'James'})
assert isinstance(james,Vertex)
#assert type(james._id) == int
assert james._type == "vertex"
assert james.name == "James"
def test_update_and_get(self):
james1 = self.vertices.create({'name':'James'})
self.vertices.update(james1._id, {'name':'James','age':34})
james2 = self.vertices.get(james1._id)
assert james2._id == james1._id
assert james2.name == "James"
assert james2.age == 34
#def test_get_all(self):
# vertices = self.vertices.get_all()
# vertices = list(vertices)
# assert len(vertices) > 0
#def test_remove_property(self):
# query_time = self.vertices.remove(self.james._id,'age')
# assert type(query_time) == float
# assert self.james.age is None
def test_delete_vertex(self):
james = self.vertices.create({'name':'James'})
resp = self.vertices.delete(james._id)
j2 = self.vertices.get(james._id)
assert j2 == None
def test_ascii_encoding(self):
# http://stackoverflow.com/questions/19824952/unicodeencodeerror-bulbs-and-neo4j-create-model
data = {u'name': u'Aname M\xf6ller'}
v1a = self.vertices.create(data)
v1b = self.vertices.get(v1a._id)
assert v1b.name == data['name']
class VertexTestCase(BulbsTestCase):
def setUp(self):
self.vertices = VertexProxy(Vertex,self.client)
self.edges = EdgeProxy(Edge,self.client)
self.james = self.vertices.create({'name':'James'})
self.julie = self.vertices.create({'name':'Julie'})
self.edges.create(self.james,"test",self.julie)
self.edges.create(self.julie,"test",self.james)
def test_init(self):
#assert type(self.james._id) == int
assert isinstance(self.james,Vertex)
assert self.james._type == "vertex"
assert self.james.name == "James"
assert self.julie._type == "vertex"
assert self.julie.name == "Julie"
def test_get_out_edges(self):
edges = self.james.outE()
edges = list(edges)
assert len(edges) == 1
def test_get_in_edges(self):
edges = self.james.inE()
edges = list(edges)
assert len(edges) == 1
def test_get_both_edges(self):
edges = self.james.bothE()
edges = list(edges)
assert len(edges) == 2
def test_get_both_labeled_edges(self):
edges = self.james.bothE("test")
edges = list(edges)
assert len(edges) == 2
class EdgeProxyTestCase(BulbsTestCase):
def setUp(self):
self.vertices = VertexProxy(Vertex,self.client)
self.edges = EdgeProxy(Edge,self.client)
self.james = self.vertices.create({'name':'James'})
self.julie = self.vertices.create({'name':'Julie'})
def test_create(self):
data = dict(timestamp=int(time.time()))
edge = self.edges.create(self.james, "test", self.julie, data)
assert edge._outV == self.james._id
assert edge._label == "test"
assert edge._inV == self.julie._id
def test_update_and_get(self):
now = int(time.time())
e1 = self.edges.create(self.james,"test",self.julie, {'timestamp': now})
assert e1.timestamp == now
later = int(time.time())
self.edges.update(e1._id, {'timestamp': later})
e2 = self.edges.get(e1._id)
assert e1._id == e2._id
assert e1._inV == e2._inV
assert e1._label == e2._label
assert e1._outV == e2._outV
assert e2.timestamp == later
#def test_get_all(self):
# edges = self.edges.get_all()
# edges = list(edges)
# assert type(edges) == list
#def test_remove_property(self):
# e1 = self.edges.create(self.james,"test",self.julie,{'time':'today'})
# query_time = self.edges.remove(e1._id,{'time'})
# assert type(query_time) == float
# assert e1.time is None
def test_delete_edge(self):
e1 = self.edges.create(self.james,"test",self.julie)
resp = self.edges.delete(e1._id)
e2 = self.edges.get(e1._id)
assert e2 == None
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(VertexProxyTestCase))
suite.addTest(unittest.makeSuite(VertexTestCase))
suite.addTest(unittest.makeSuite(EdgeProxyTestCase))
# NOTE: there are no tests for the Edge because it doesn't have methods.
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| mit |
nash-x/hws | nova/cmd/xvpvncproxy.py | 39 | 1133 | # Copyright (c) 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""XVP VNC Console Proxy Server."""
import sys
from nova import config
from nova.openstack.common import log as logging
from nova.openstack.common.report import guru_meditation_report as gmr
from nova import service
from nova import version
from nova.vnc import xvp_proxy
def main():
config.parse_args(sys.argv)
logging.setup("nova")
gmr.TextGuruMeditation.setup_autorun(version)
wsgi_server = xvp_proxy.get_wsgi_server()
service.serve(wsgi_server)
service.wait()
| apache-2.0 |
jstoxrocky/statsmodels | statsmodels/sandbox/regression/gmm.py | 8 | 60908 | '''Generalized Method of Moments, GMM, and Two-Stage Least Squares for
instrumental variables IV2SLS
Issues
------
* number of parameters, nparams, and starting values for parameters
Where to put them? start was initially taken from global scope (bug)
* When optimal weighting matrix cannot be calculated numerically
In DistQuantilesGMM, we only have one row of moment conditions, not a
moment condition for each observation, calculation for cov of moments
breaks down. iter=1 works (weights is identity matrix)
-> need method to do one iteration with an identity matrix or an
analytical weighting matrix given as parameter.
-> add result statistics for this case, e.g. cov_params, I have it in the
standalone function (and in calc_covparams which is a copy of it),
but not tested yet.
DONE `fitonce` in DistQuantilesGMM, params are the same as in direct call to fitgmm
move it to GMM class (once it's clearer for which cases I need this.)
* GMM doesn't know anything about the underlying model, e.g. y = X beta + u or panel
data model. It would be good if we can reuse methods from regressions, e.g.
predict, fitted values, calculating the error term, and some result statistics.
What's the best way to do this, multiple inheritance, outsourcing the functions,
mixins or delegation (a model creates a GMM instance just for estimation).
Unclear
-------
* dof in Hausman
- based on rank
- differs between IV2SLS method and function used with GMM or (IV2SLS)
- with GMM, covariance matrix difference has negative eigenvalues in iv example, ???
* jtest/jval
- I'm not sure about the normalization (multiply or divide by nobs) in jtest.
need a test case. Scaling of jval is irrelevant for estimation.
jval in jtest looks to large in example, but I have no idea about the size
* bse for fitonce look too large (no time for checking now)
formula for calc_cov_params for the case without optimal weighting matrix
is wrong. I don't have an estimate for omega in that case. And I'm confusing
between weights and omega, which are *not* the same in this case.
Author: josef-pktd
License: BSD (3-clause)
'''
from __future__ import print_function
from statsmodels.compat.python import lrange
import numpy as np
from scipy import optimize, stats
from statsmodels.tools.numdiff import approx_fprime, approx_hess
from statsmodels.base.model import (Model,
LikelihoodModel, LikelihoodModelResults)
from statsmodels.regression.linear_model import (OLS, RegressionResults,
RegressionResultsWrapper)
import statsmodels.stats.sandwich_covariance as smcov
from statsmodels.tools.decorators import (resettable_cache, cache_readonly)
from statsmodels.compat.numpy import np_matrix_rank
DEBUG = 0
def maxabs(x):
'''just a shortcut to np.abs(x).max()
'''
return np.abs(x).max()
class IV2SLS(LikelihoodModel):
'''
Class for instrumental variables estimation using Two-Stage Least-Squares
Parameters
----------
endog: array 1d
endogenous variable
exog : array
explanatory variables
instruments : array
instruments for explanatory variables, needs to contain those exog
variables that are not instrumented out
Notes
-----
All variables in exog are instrumented in the calculations. If variables
in exog are not supposed to be instrumented out, then these variables
need also to be included in the instrument array.
Degrees of freedom in the calculation of the standard errors uses
`df_resid = (nobs - k_vars)`.
(This corresponds to the `small` option in Stata's ivreg2.)
'''
def __init__(self, endog, exog, instrument=None):
self.instrument = instrument
super(IV2SLS, self).__init__(endog, exog)
# where is this supposed to be handled
#Note: Greene p.77/78 dof correction is not necessary (because only
# asy results), but most packages do it anyway
self.df_resid = self.exog.shape[0] - self.exog.shape[1]
#self.df_model = float(self.rank - self.k_constant)
self.df_model = float(self.exog.shape[1] - self.k_constant)
def initialize(self):
self.wendog = self.endog
self.wexog = self.exog
def whiten(self, X):
pass
def fit(self):
'''estimate model using 2SLS IV regression
Returns
-------
results : instance of RegressionResults
regression result
Notes
-----
This returns a generic RegressioResults instance as defined for the
linear models.
Parameter estimates and covariance are correct, but other results
haven't been tested yet, to seee whether they apply without changes.
'''
#Greene 5th edt., p.78 section 5.4
#move this maybe
y,x,z = self.endog, self.exog, self.instrument
# TODO: this uses "textbook" calculation, improve linalg
ztz = np.dot(z.T, z)
ztx = np.dot(z.T, x)
self.xhatparams = xhatparams = np.linalg.solve(ztz, ztx)
#print 'x.T.shape, xhatparams.shape', x.shape, xhatparams.shape
F = xhat = np.dot(z, xhatparams)
FtF = np.dot(F.T, F)
self.xhatprod = FtF #store for Housman specification test
Ftx = np.dot(F.T, x)
Fty = np.dot(F.T, y)
params = np.linalg.solve(FtF, Fty)
Ftxinv = np.linalg.inv(Ftx)
self.normalized_cov_params = np.dot(Ftxinv.T, np.dot(FtF, Ftxinv))
lfit = IVRegressionResults(self, params,
normalized_cov_params=self.normalized_cov_params)
lfit.exog_hat_params = xhatparams
lfit.exog_hat = xhat # TODO: do we want to store this, might be large
self._results = lfit # TODO : remove this
self._results_ols2nd = OLS(y, xhat).fit()
return RegressionResultsWrapper(lfit)
#copied from GLS, because I subclass currently LikelihoodModel and not GLS
def predict(self, params, exog=None):
"""
Return linear predicted values from a design matrix.
Parameters
----------
exog : array-like
Design / exogenous data
params : array-like, optional after fit has been called
Parameters of a linear model
Returns
-------
An array of fitted values
Notes
-----
If the model as not yet been fit, params is not optional.
"""
if exog is None:
exog = self.exog
return np.dot(exog, params)
#JP: this doesn't look correct for GLMAR
#SS: it needs its own predict method
if self._results is None and params is None:
raise ValueError("If the model has not been fit, then you must specify the params argument.")
if self._results is not None:
return np.dot(exog, self._results.params)
else:
return np.dot(exog, params)
class IVRegressionResults(RegressionResults):
"""
Results class for for an OLS model.
Most of the methods and attributes are inherited from RegressionResults.
The special methods that are only available for OLS are:
- get_influence
- outlier_test
- el_test
- conf_int_el
See Also
--------
RegressionResults
"""
@cache_readonly
def fvalue(self):
k_vars = len(self.params)
restriction = np.eye(k_vars)
idx_noconstant = lrange(k_vars)
del idx_noconstant[self.model.data.const_idx]
fval = self.f_test(restriction[idx_noconstant]).fvalue # without constant
return fval
def spec_hausman(self, dof=None):
'''Hausman's specification test
See Also
--------
spec_hausman : generic function for Hausman's specification test
'''
#use normalized cov_params for OLS
endog, exog = self.model.endog, self.model.exog
resols = OLS(endog, exog).fit()
normalized_cov_params_ols = resols.model.normalized_cov_params
# Stata `ivendog` doesn't use df correction for se
#se2 = resols.mse_resid #* resols.df_resid * 1. / len(endog)
se2 = resols.ssr / len(endog)
params_diff = self.params - resols.params
cov_diff = np.linalg.pinv(self.model.xhatprod) - normalized_cov_params_ols
#TODO: the following is very inefficient, solves problem (svd) twice
#use linalg.lstsq or svd directly
#cov_diff will very often be in-definite (singular)
if not dof:
dof = np_matrix_rank(cov_diff)
cov_diffpinv = np.linalg.pinv(cov_diff)
H = np.dot(params_diff, np.dot(cov_diffpinv, params_diff))/se2
pval = stats.chi2.sf(H, dof)
return H, pval, dof
# copied from regression results with small changes, no llf
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""Summarize the Regression Results
Parameters
-----------
yname : string, optional
Default is `y`
xname : list of strings, optional
Default is `var_##` for ## in p the number of regressors
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
#TODO: import where we need it (for now), add as cached attributes
from statsmodels.stats.stattools import (jarque_bera,
omni_normtest, durbin_watson)
jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)
omni, omnipv = omni_normtest(self.wresid)
#TODO: reuse condno from somewhere else ?
#condno = np.linalg.cond(np.dot(self.wexog.T, self.wexog))
wexog = self.model.wexog
eigvals = np.linalg.linalg.eigvalsh(np.dot(wexog.T, wexog))
eigvals = np.sort(eigvals) #in increasing order
condno = np.sqrt(eigvals[-1]/eigvals[0])
# TODO: check what is valid.
# box-pierce, breush-pagan, durbin's h are not with endogenous on rhs
# use Cumby Huizinga 1992 instead
self.diagn = dict(jb=jb, jbpv=jbpv, skew=skew, kurtosis=kurtosis,
omni=omni, omnipv=omnipv, condno=condno,
mineigval=eigvals[0])
#TODO not used yet
#diagn_left_header = ['Models stats']
#diagn_right_header = ['Residual stats']
#TODO: requiring list/iterable is a bit annoying
#need more control over formatting
#TODO: default don't work if it's not identically spelled
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Two Stage']),
('', ['Least Squares']),
('Date:', None),
('Time:', None),
('No. Observations:', None),
('Df Residuals:', None), #[self.df_resid]), #TODO: spelling
('Df Model:', None), #[self.df_model])
]
top_right = [('R-squared:', ["%#8.3f" % self.rsquared]),
('Adj. R-squared:', ["%#8.3f" % self.rsquared_adj]),
('F-statistic:', ["%#8.4g" % self.fvalue] ),
('Prob (F-statistic):', ["%#6.3g" % self.f_pvalue]),
#('Log-Likelihood:', None), #["%#6.4g" % self.llf]),
#('AIC:', ["%#8.4g" % self.aic]),
#('BIC:', ["%#8.4g" % self.bic])
]
diagn_left = [('Omnibus:', ["%#6.3f" % omni]),
('Prob(Omnibus):', ["%#6.3f" % omnipv]),
('Skew:', ["%#6.3f" % skew]),
('Kurtosis:', ["%#6.3f" % kurtosis])
]
diagn_right = [('Durbin-Watson:', ["%#8.3f" % durbin_watson(self.wresid)]),
('Jarque-Bera (JB):', ["%#8.3f" % jb]),
('Prob(JB):', ["%#8.3g" % jbpv]),
('Cond. No.', ["%#8.3g" % condno])
]
if title is None:
title = self.model.__class__.__name__ + ' ' + "Regression Results"
#create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=True)
smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,
yname=yname, xname=xname,
title="")
return smry
############# classes for Generalized Method of Moments GMM
_gmm_options = '''\
Options for GMM
---------------
Type of GMM
~~~~~~~~~~~
- one-step
- iterated
- CUE : not tested yet
weight matrix
~~~~~~~~~~~~~
- `weights_method` : string, defines method for robust
Options here are similar to :mod:`statsmodels.stats.robust_covariance`
default is heteroscedasticity consistent, HC0
currently available methods are
- `cov` : HC0, optionally with degrees of freedom correction
- `hac` :
- `iid` : untested, only for Z*u case, IV cases with u as error indep of Z
- `ac` : not available yet
- `cluster` : not connected yet
- others from robust_covariance
other arguments:
- `wargs` : tuple or dict, required arguments for weights_method
- `centered` : bool,
indicates whether moments are centered for the calculation of the weights
and covariance matrix, applies to all weight_methods
- `ddof` : int
degrees of freedom correction, applies currently only to `cov`
- maxlag : int
number of lags to include in HAC calculation , applies only to `hac`
- others not yet, e.g. groups for cluster robust
covariance matrix
~~~~~~~~~~~~~~~~~
The same options as for weight matrix also apply to the calculation of the
estimate of the covariance matrix of the parameter estimates.
The additional option is
- `has_optimal_weights`: If true, then the calculation of the covariance
matrix assumes that we have optimal GMM with :math:`W = S^{-1}`.
Default is True.
TODO: do we want to have a different default after `onestep`?
'''
class GMM(Model):
'''
Class for estimation by Generalized Method of Moments
needs to be subclassed, where the subclass defined the moment conditions
`momcond`
Parameters
----------
endog : array
endogenous variable, see notes
exog : array
array of exogenous variables, see notes
instrument : array
array of instruments, see notes
nmoms : None or int
number of moment conditions, if None then it is set equal to the
number of columns of instruments. Mainly needed to determin the shape
or size of start parameters and starting weighting matrix.
kwds : anything
this is mainly if additional variables need to be stored for the
calculations of the moment conditions
Returns
-------
*Attributes*
results : instance of GMMResults
currently just a storage class for params and cov_params without it's
own methods
bse : property
return bse
Notes
-----
The GMM class only uses the moment conditions and does not use any data
directly. endog, exog, instrument and kwds in the creation of the class
instance are only used to store them for access in the moment conditions.
Which of this are required and how they are used depends on the moment
conditions of the subclass.
Warning:
Options for various methods have not been fully implemented and
are still missing in several methods.
TODO:
currently onestep (maxiter=0) still produces an updated estimate of bse
and cov_params.
'''
results_class = 'GMMResults'
def __init__(self, endog, exog, instrument, k_moms=None, k_params=None,
missing='none', **kwds):
'''
maybe drop and use mixin instead
TODO: GMM doesn't really care about the data, just the moment conditions
'''
instrument = self._check_inputs(instrument, endog) # attaches if needed
super(GMM, self).__init__(endog, exog, missing=missing,
instrument=instrument)
# self.endog = endog
# self.exog = exog
# self.instrument = instrument
self.nobs = endog.shape[0]
if k_moms is not None:
self.nmoms = k_moms
elif instrument is not None:
self.nmoms = instrument.shape[1]
else:
self.nmoms = np.nan
if k_params is not None:
self.k_params = k_params
elif instrument is not None:
self.k_params = exog.shape[1]
else:
self.k_params = np.nan
self.__dict__.update(kwds)
self.epsilon_iter = 1e-6
def _check_inputs(self, instrument, endog):
if instrument is not None:
offset = np.asarray(instrument)
if offset.shape[0] != endog.shape[0]:
raise ValueError("instrument is not the same length as endog")
return instrument
def _fix_param_names(self, params, param_names=None):
# TODO: this is a temporary fix, need
xnames = self.data.xnames
if not param_names is None:
if len(params) == len(param_names):
self.data.xnames = param_names
else:
raise ValueError('param_names has the wrong length')
else:
if len(params) < len(xnames):
# cut in front for poisson multiplicative
self.data.xnames = xnames[-len(params):]
elif len(params) > len(xnames):
# cut at the end
self.data.xnames = xnames[:len(params)]
def fit(self, start_params=None, maxiter=10, inv_weights=None,
weights_method='cov', wargs=(),
has_optimal_weights=True,
optim_method='bfgs', optim_args=None):
'''
Estimate parameters using GMM and return GMMResults
TODO: weight and covariance arguments still need to be made consistent
with similar options in other models,
see RegressionResult.get_robustcov_results
Parameters
----------
start_params : array (optional)
starting value for parameters ub minimization. If None then
fitstart method is called for the starting values.
maxiter : int or 'cue'
Number of iterations in iterated GMM. The onestep estimate can be
obtained with maxiter=0 or 1. If maxiter is large, then the
iteration will stop either at maxiter or on convergence of the
parameters (TODO: no options for convergence criteria yet.)
If `maxiter == 'cue'`, the the continuously updated GMM is
calculated which updates the weight matrix during the minimization
of the GMM objective function. The CUE estimation uses the onestep
parameters as starting values.
inv_weights : None or ndarray
inverse of the starting weighting matrix. If inv_weights are not
given then the method `start_weights` is used which depends on
the subclass, for IV subclasses `inv_weights = z'z` where `z` are
the instruments, otherwise an identity matrix is used.
weights_method : string, defines method for robust
Options here are similar to :mod:`statsmodels.stats.robust_covariance`
default is heteroscedasticity consistent, HC0
currently available methods are
- `cov` : HC0, optionally with degrees of freedom correction
- `hac` :
- `iid` : untested, only for Z*u case, IV cases with u as error indep of Z
- `ac` : not available yet
- `cluster` : not connected yet
- others from robust_covariance
wargs` : tuple or dict,
required and optional arguments for weights_method
- `centered` : bool,
indicates whether moments are centered for the calculation of the weights
and covariance matrix, applies to all weight_methods
- `ddof` : int
degrees of freedom correction, applies currently only to `cov`
- `maxlag` : int
number of lags to include in HAC calculation , applies only to `hac`
- others not yet, e.g. groups for cluster robust
has_optimal_weights: If true, then the calculation of the covariance
matrix assumes that we have optimal GMM with :math:`W = S^{-1}`.
Default is True.
TODO: do we want to have a different default after `onestep`?
optim_method : string, default is 'bfgs'
numerical optimization method. Currently not all optimizers that
are available in LikelihoodModels are connected.
optim_args : dict
keyword arguments for the numerical optimizer.
Returns
-------
results : instance of GMMResults
this is also attached as attribute results
Notes
-----
Warning: One-step estimation, `maxiter` either 0 or 1, still has
problems (at least compared to Stata's gmm).
By default it uses a heteroscedasticity robust covariance matrix, but
uses the assumption that the weight matrix is optimal.
See options for cov_params in the results instance.
The same options as for weight matrix also apply to the calculation of
the estimate of the covariance matrix of the parameter estimates.
'''
# TODO: add check for correct wargs keys
# currently a misspelled key is not detected,
# because I'm still adding options
# TODO: check repeated calls to fit with different options
# arguments are dictionaries, i.e. mutable
# unit test if anything is stale or spilled over.
#bug: where does start come from ???
start = start_params # alias for renaming
if start is None:
start = self.fitstart() #TODO: temporary hack
if inv_weights is None:
inv_weights
if optim_args is None:
optim_args = {}
if not 'disp' in optim_args:
optim_args['disp'] = 1
if maxiter == 0 or maxiter == 'cue':
if inv_weights is not None:
weights = np.linalg.pinv(inv_weights)
else:
# let start_weights handle the inv=False for maxiter=0
weights = self.start_weights(inv=False)
params = self.fitgmm(start, weights=weights,
optim_method=optim_method, optim_args=optim_args)
weights_ = weights # temporary alias used in jval
else:
params, weights = self.fititer(start,
maxiter=maxiter,
start_invweights=inv_weights,
weights_method=weights_method,
wargs=wargs,
optim_method=optim_method,
optim_args=optim_args)
# TODO weights returned by fititer is inv_weights - not true anymore
# weights_ currently not necessary and used anymore
weights_ = np.linalg.pinv(weights)
if maxiter == 'cue':
#we have params from maxiter= 0 as starting value
# TODO: need to give weights options to gmmobjective_cu
params = self.fitgmm_cu(params,
optim_method=optim_method,
optim_args=optim_args)
# weights is stored as attribute
weights = self._weights_cu
#TODO: use Bunch instead ?
options_other = {'weights_method':weights_method,
'has_optimal_weights':has_optimal_weights,
'optim_method':optim_method}
# check that we have the right number of xnames
self._fix_param_names(params, param_names=None)
results = results_class_dict[self.results_class](
model = self,
params = params,
weights = weights,
wargs = wargs,
options_other = options_other,
optim_args = optim_args)
self.results = results # FIXME: remove, still keeping it temporarily
return results
def fitgmm(self, start, weights=None, optim_method='bfgs', optim_args=None):
'''estimate parameters using GMM
Parameters
----------
start : array_like
starting values for minimization
weights : array
weighting matrix for moment conditions. If weights is None, then
the identity matrix is used
Returns
-------
paramest : array
estimated parameters
Notes
-----
todo: add fixed parameter option, not here ???
uses scipy.optimize.fmin
'''
## if not fixed is None: #fixed not defined in this version
## raise NotImplementedError
# TODO: should start_weights only be in `fit`
if weights is None:
weights = self.start_weights(inv=False)
if optim_args is None:
optim_args = {}
if optim_method == 'nm':
optimizer = optimize.fmin
elif optim_method == 'bfgs':
optimizer = optimize.fmin_bfgs
# TODO: add score
optim_args['fprime'] = self.score #lambda params: self.score(params, weights)
elif optim_method == 'ncg':
optimizer = optimize.fmin_ncg
optim_args['fprime'] = self.score
elif optim_method == 'cg':
optimizer = optimize.fmin_cg
optim_args['fprime'] = self.score
elif optim_method == 'fmin_l_bfgs_b':
optimizer = optimize.fmin_l_bfgs_b
optim_args['fprime'] = self.score
elif optim_method == 'powell':
optimizer = optimize.fmin_powell
elif optim_method == 'slsqp':
optimizer = optimize.fmin_slsqp
else:
raise ValueError('optimizer method not available')
if DEBUG:
print(np.linalg.det(weights))
#TODO: add other optimization options and results
return optimizer(self.gmmobjective, start, args=(weights,),
**optim_args)
def fitgmm_cu(self, start, optim_method='bfgs', optim_args=None):
'''estimate parameters using continuously updating GMM
Parameters
----------
start : array_like
starting values for minimization
Returns
-------
paramest : array
estimated parameters
Notes
-----
todo: add fixed parameter option, not here ???
uses scipy.optimize.fmin
'''
## if not fixed is None: #fixed not defined in this version
## raise NotImplementedError
if optim_args is None:
optim_args = {}
if optim_method == 'nm':
optimizer = optimize.fmin
elif optim_method == 'bfgs':
optimizer = optimize.fmin_bfgs
optim_args['fprime'] = self.score_cu
elif optim_method == 'ncg':
optimizer = optimize.fmin_ncg
else:
raise ValueError('optimizer method not available')
#TODO: add other optimization options and results
return optimizer(self.gmmobjective_cu, start, args=(), **optim_args)
def start_weights(self, inv=True):
return np.eye(self.nmoms)
def gmmobjective(self, params, weights):
'''
objective function for GMM minimization
Parameters
----------
params : array
parameter values at which objective is evaluated
weights : array
weighting matrix
Returns
-------
jval : float
value of objective function
'''
moms = self.momcond_mean(params)
return np.dot(np.dot(moms, weights), moms)
#moms = self.momcond(params)
#return np.dot(np.dot(moms.mean(0),weights), moms.mean(0))
def gmmobjective_cu(self, params, weights_method='cov',
wargs=()):
'''
objective function for continuously updating GMM minimization
Parameters
----------
params : array
parameter values at which objective is evaluated
Returns
-------
jval : float
value of objective function
'''
moms = self.momcond(params)
inv_weights = self.calc_weightmatrix(moms, weights_method=weights_method,
wargs=wargs)
weights = np.linalg.pinv(inv_weights)
self._weights_cu = weights # store if we need it later
return np.dot(np.dot(moms.mean(0), weights), moms.mean(0))
def fititer(self, start, maxiter=2, start_invweights=None,
weights_method='cov', wargs=(), optim_method='bfgs',
optim_args=None):
'''iterative estimation with updating of optimal weighting matrix
stopping criteria are maxiter or change in parameter estimate less
than self.epsilon_iter, with default 1e-6.
Parameters
----------
start : array
starting value for parameters
maxiter : int
maximum number of iterations
start_weights : array (nmoms, nmoms)
initial weighting matrix; if None, then the identity matrix
is used
weights_method : {'cov', ...}
method to use to estimate the optimal weighting matrix,
see calc_weightmatrix for details
Returns
-------
params : array
estimated parameters
weights : array
optimal weighting matrix calculated with final parameter
estimates
Notes
-----
'''
self.history = []
momcond = self.momcond
if start_invweights is None:
w = self.start_weights(inv=True)
else:
w = start_invweights
#call fitgmm function
#args = (self.endog, self.exog, self.instrument)
#args is not used in the method version
winv_new = w
for it in range(maxiter):
winv = winv_new
w = np.linalg.pinv(winv)
#this is still calling function not method
## resgmm = fitgmm(momcond, (), start, weights=winv, fixed=None,
## weightsoptimal=False)
resgmm = self.fitgmm(start, weights=w, optim_method=optim_method,
optim_args=optim_args)
moms = momcond(resgmm)
# the following is S = cov_moments
winv_new = self.calc_weightmatrix(moms,
weights_method=weights_method,
wargs=wargs, params=resgmm)
if it > 2 and maxabs(resgmm - start) < self.epsilon_iter:
#check rule for early stopping
# TODO: set has_optimal_weights = True
break
start = resgmm
return resgmm, w
def calc_weightmatrix(self, moms, weights_method='cov', wargs=(),
params=None):
'''
calculate omega or the weighting matrix
Parameters
----------
moms : array, (nobs, nmoms)
moment conditions for all observations evaluated at a parameter
value
weights_method : string 'cov'
If method='cov' is cov then the matrix is calculated as simple
covariance of the moment conditions.
see fit method for available aoptions for the weight and covariance
matrix
wargs : tuple or dict
parameters that are required by some kernel methods to
estimate the long-run covariance. Not used yet.
Returns
-------
w : array (nmoms, nmoms)
estimate for the weighting matrix or covariance of the moment
condition
Notes
-----
currently a constant cutoff window is used
TODO: implement long-run cov estimators, kernel-based
Newey-West
Andrews
Andrews-Moy????
References
----------
Greene
Hansen, Bruce
'''
nobs, k_moms = moms.shape
# TODO: wargs are tuple or dict ?
if DEBUG:
print(' momcov wargs', wargs)
centered = not ('centered' in wargs and not wargs['centered'])
if not centered:
# caller doesn't want centered moment conditions
moms_ = moms
else:
moms_ = moms - moms.mean()
# TODO: store this outside to avoid doing this inside optimization loop
# TODO: subclasses need to be able to add weights_methods, and remove
# IVGMM can have homoscedastic (OLS),
# some options won't make sense in some cases
# possible add all here and allow subclasses to define a list
# TODO: should other weights_methods also have `ddof`
if weights_method == 'cov':
w = np.dot(moms_.T, moms_)
if 'ddof' in wargs:
# caller requests degrees of freedom correction
if wargs['ddof'] == 'k_params':
w /= (nobs - self.k_params)
else:
if DEBUG:
print(' momcov ddof', wargs['ddof'])
w /= (nobs - wargs['ddof'])
else:
# default: divide by nobs
w /= nobs
elif weights_method == 'flatkernel':
#uniform cut-off window
# This was a trial version, can use HAC with flatkernel
if not 'maxlag' in wargs:
raise ValueError('flatkernel requires maxlag')
maxlag = wargs['maxlag']
h = np.ones(maxlag + 1)
w = np.dot(moms_.T, moms_)/nobs
for i in range(1,maxlag+1):
w += (h[i] * np.dot(moms_[i:].T, moms_[:-i]) / (nobs-i))
elif weights_method == 'hac':
maxlag = wargs['maxlag']
if 'kernel' in wargs:
weights_func = wargs['kernel']
else:
weights_func = smcov.weights_bartlett
wargs['kernel'] = weights_func
w = smcov.S_hac_simple(moms_, nlags=maxlag,
weights_func=weights_func)
w /= nobs #(nobs - self.k_params)
elif weights_method == 'iid':
# only when we have instruments and residual mom = Z * u
# TODO: problem we don't have params in argument
# I cannot keep everything in here w/o params as argument
u = self.get_error(params)
if centered:
# Note: I'm not centering instruments,
# shouldn't we always center u? Ok, with centered as default
u -= u.mean(0) #demean inplace, we don't need original u
instrument = self.instrument
w = np.dot(instrument.T, instrument).dot(np.dot(u.T, u)) / nobs
if 'ddof' in wargs:
# caller requests degrees of freedom correction
if wargs['ddof'] == 'k_params':
w /= (nobs - self.k_params)
else:
# assume ddof is a number
if DEBUG:
print(' momcov ddof', wargs['ddof'])
w /= (nobs - wargs['ddof'])
else:
# default: divide by nobs
w /= nobs
else:
raise ValueError('weight method not available')
return w
def momcond_mean(self, params):
'''
mean of moment conditions,
'''
momcond = self.momcond(params)
self.nobs_moms, self.k_moms = momcond.shape
return momcond.mean(0)
def gradient_momcond(self, params, epsilon=1e-4, centered=True):
'''gradient of moment conditions
Parameters
----------
params : ndarray
parameter at which the moment conditions are evaluated
epsilon : float
stepsize for finite difference calculation
centered : bool
This refers to the finite difference calculation. If `centered`
is true, then the centered finite difference calculation is
used. Otherwise the one-sided forward differences are used.
TODO: looks like not used yet
missing argument `weights`
'''
momcond = self.momcond_mean
# TODO: approx_fprime has centered keyword
if centered:
gradmoms = (approx_fprime(params, momcond, epsilon=epsilon) +
approx_fprime(params, momcond, epsilon=-epsilon))/2
else:
gradmoms = approx_fprime(params, momcond, epsilon=epsilon)
return gradmoms
def score(self, params, weights, epsilon=None, centered=True):
deriv = approx_fprime(params, self.gmmobjective, args=(weights,),
centered=centered, epsilon=epsilon)
return deriv
def score_cu(self, params, epsilon=None, centered=True):
deriv = approx_fprime(params, self.gmmobjective_cu, args=(),
centered=centered, epsilon=epsilon)
return deriv
# TODO: wrong superclass, I want tvalues, ... right now
class GMMResults(LikelihoodModelResults):
'''just a storage class right now'''
def __init__(self, *args, **kwds):
self.__dict__.update(kwds)
self.nobs = self.model.nobs
@cache_readonly
def q(self):
return self.model.gmmobjective(self.params, self.weights)
@cache_readonly
def jval(self):
# nobs_moms attached by momcond_mean
return self.q * self.model.nobs_moms
def cov_params(self, **kwds):
#TODO add options ???)
# this should use by default whatever options have been specified in
# fit
# TODO: don't do this when we want to change options
# if hasattr(self, '_cov_params'):
# #replace with decorator later
# return self._cov_params
# set defaults based on fit arguments
if not 'wargs' in kwds:
# Note: we don't check the keys in wargs, use either all or nothing
kwds['wargs'] = self.wargs
if not 'weights_method' in kwds:
kwds['weights_method'] = self.options_other['weights_method']
if not 'has_optimal_weights' in kwds:
kwds['has_optimal_weights'] = self.options_other['has_optimal_weights']
gradmoms = self.model.gradient_momcond(self.params)
moms = self.model.momcond(self.params)
covparams = self.calc_cov_params(moms, gradmoms, **kwds)
self._cov_params = covparams
return self._cov_params
def calc_cov_params(self, moms, gradmoms, weights=None, use_weights=False,
has_optimal_weights=True,
weights_method='cov', wargs=()):
'''calculate covariance of parameter estimates
not all options tried out yet
If weights matrix is given, then the formula use to calculate cov_params
depends on whether has_optimal_weights is true.
If no weights are given, then the weight matrix is calculated with
the given method, and has_optimal_weights is assumed to be true.
(API Note: The latter assumption could be changed if we allow for
has_optimal_weights=None.)
'''
nobs = moms.shape[0]
if weights is None:
#omegahat = self.model.calc_weightmatrix(moms, method=method, wargs=wargs)
#has_optimal_weights = True
#add other options, Barzen, ... longrun var estimators
# TODO: this might still be inv_weights after fititer
weights = self.weights
else:
pass
#omegahat = weights #2 different names used,
#TODO: this is wrong, I need an estimate for omega
if use_weights:
omegahat = weights
else:
omegahat = self.model.calc_weightmatrix(
moms,
weights_method=weights_method,
wargs=wargs,
params=self.params)
if has_optimal_weights: #has_optimal_weights:
# TOD0 make has_optimal_weights depend on convergence or iter >2
cov = np.linalg.inv(np.dot(gradmoms.T,
np.dot(np.linalg.inv(omegahat), gradmoms)))
else:
gw = np.dot(gradmoms.T, weights)
gwginv = np.linalg.inv(np.dot(gw, gradmoms))
cov = np.dot(np.dot(gwginv, np.dot(np.dot(gw, omegahat), gw.T)), gwginv)
#cov /= nobs
return cov/nobs
@property
def bse(self):
'''standard error of the parameter estimates
'''
return self.get_bse()
def get_bse(self, **kwds):
'''standard error of the parameter estimates with options
Parameters
----------
kwds : optional keywords
options for calculating cov_params
Returns
-------
bse : ndarray
estimated standard error of parameter estimates
'''
return np.sqrt(np.diag(self.cov_params(**kwds)))
def jtest(self):
'''overidentification test
I guess this is missing a division by nobs,
what's the normalization in jval ?
'''
jstat = self.jval
nparams = self.params.size #self.nparams
df = self.model.nmoms - nparams
return jstat, stats.chi2.sf(jstat, df), df
def compare_j(self, other):
'''overidentification test for comparing two nested gmm estimates
This assumes that some moment restrictions have been dropped in one
of the GMM estimates relative to the other.
Not tested yet
We are comparing two separately estimated models, that use different
weighting matrices. It is not guaranteed that the resulting
difference is positive.
TODO: Check in which cases Stata programs use the same weigths
'''
jstat1 = self.jval
k_moms1 = self.model.nmoms
jstat2 = other.jval
k_moms2 = other.model.nmoms
jdiff = jstat1 - jstat2
df = k_moms1 - k_moms2
if df < 0:
# possible nested in other way, TODO allow this or not
# flip sign instead of absolute
df = - df
jdiff = - jdiff
return jdiff, stats.chi2.sf(jdiff, df), df
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""Summarize the Regression Results
Parameters
-----------
yname : string, optional
Default is `y`
xname : list of strings, optional
Default is `var_##` for ## in p the number of regressors
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
#TODO: add a summary text for options that have been used
jvalue, jpvalue, jdf = self.jtest()
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['GMM']),
('Date:', None),
('Time:', None),
('No. Observations:', None),
#('Df Residuals:', None), #[self.df_resid]), #TODO: spelling
#('Df Model:', None), #[self.df_model])
]
top_right = [#('R-squared:', ["%#8.3f" % self.rsquared]),
#('Adj. R-squared:', ["%#8.3f" % self.rsquared_adj]),
('Hansen J:', ["%#8.4g" % jvalue] ),
('Prob (Hansen J):', ["%#6.3g" % jpvalue]),
#('F-statistic:', ["%#8.4g" % self.fvalue] ),
#('Prob (F-statistic):', ["%#6.3g" % self.f_pvalue]),
#('Log-Likelihood:', None), #["%#6.4g" % self.llf]),
#('AIC:', ["%#8.4g" % self.aic]),
#('BIC:', ["%#8.4g" % self.bic])
]
if title is None:
title = self.model.__class__.__name__ + ' ' + "Results"
#create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=False)
return smry
class IVGMM(GMM):
'''
Basic class for instrumental variables estimation using GMM
A linear function for the conditional mean is defined as default but the
methods should be overwritten by subclasses, currently `LinearIVGMM` and
`NonlinearIVGMM` are implemented as subclasses.
See Also
--------
LinearIVGMM
NonlinearIVGMM
'''
results_class = 'IVGMMResults'
def fitstart(self):
return np.zeros(self.exog.shape[1])
def start_weights(self, inv=True):
zz = np.dot(self.instrument.T, self.instrument)
nobs = self.instrument.shape[0]
if inv:
return zz / nobs
else:
return np.linalg.pinv(zz / nobs)
def get_error(self, params):
return self.endog - self.predict(params)
def predict(self, params, exog=None):
if exog is None:
exog = self.exog
return np.dot(exog, params)
def momcond(self, params):
instrument = self.instrument
return instrument * self.get_error(params)[:,None]
class LinearIVGMM(IVGMM):
"""class for linear instrumental variables models estimated with GMM
Uses closed form expression instead of nonlinear optimizers for each step
of the iterative GMM.
The model is assumed to have the following moment condition
E( z * (y - x beta)) = 0
Where `y` is the dependent endogenous variable, `x` are the explanatory
variables and `z` are the instruments. Variables in `x` that are exogenous
need also be included in `z`.
Notation Warning: our name `exog` stands for the explanatory variables,
and includes both exogenous and explanatory variables that are endogenous,
i.e. included endogenous variables
Parameters
----------
endog : array_like
dependent endogenous variable
exog : array_like
explanatory, right hand side variables, including explanatory variables
that are endogenous
instruments : array_like
Instrumental variables, variables that are exogenous to the error
in the linear model containing both included and excluded exogenous
variables
"""
def fitgmm(self, start, weights=None, optim_method=None, **kwds):
'''estimate parameters using GMM for linear model
Uses closed form expression instead of nonlinear optimizers
Parameters
----------
start : not used
starting values for minimization, not used, only for consistency
of method signature
weights : array
weighting matrix for moment conditions. If weights is None, then
the identity matrix is used
optim_method : not used,
optimization method, not used, only for consistency of method
signature
**kwds : keyword arguments
not used, will be silently ignored (for compatibility with generic)
Returns
-------
paramest : array
estimated parameters
'''
## if not fixed is None: #fixed not defined in this version
## raise NotImplementedError
# TODO: should start_weights only be in `fit`
if weights is None:
weights = self.start_weights(inv=False)
y, x, z = self.endog, self.exog, self.instrument
zTx = np.dot(z.T, x)
zTy = np.dot(z.T, y)
# normal equation, solved with pinv
part0 = zTx.T.dot(weights)
part1 = part0.dot(zTx)
part2 = part0.dot(zTy)
params = np.linalg.pinv(part1).dot(part2)
return params
def predict(self, params, exog=None):
if exog is None:
exog = self.exog
return np.dot(exog, params)
def gradient_momcond(self, params, **kwds):
# **kwds for compatibility not used
x, z = self.exog, self.instrument
gradmoms = -np.dot(z.T, x) / self.nobs
return gradmoms
def score(self, params, weights, **kwds):
# **kwds for compatibility, not used
# Note: I coud use general formula with gradient_momcond instead
x, z = self.exog, self.instrument
nobs = z.shape[0]
u = self.get_errors(params)
score = -2 * np.dot(x.T, z).dot(weights.dot(np.dot(z.T, u)))
score /= nobs * nobs
return score
class NonlinearIVGMM(IVGMM):
"""
Class for non-linear instrumental variables estimation wusing GMM
The model is assumed to have the following moment condition
E[ z * (y - f(X, beta)] = 0
Where `y` is the dependent endogenous variable, `x` are the explanatory
variables and `z` are the instruments. Variables in `x` that are exogenous
need also be included in z. `f` is a nonlinear function.
Notation Warning: our name `exog` stands for the explanatory variables,
and includes both exogenous and explanatory variables that are endogenous,
i.e. included endogenous variables
Parameters
----------
endog : array_like
dependent endogenous variable
exog : array_like
explanatory, right hand side variables, including explanatory variables
that are endogenous.
instruments : array_like
Instrumental variables, variables that are exogenous to the error
in the linear model containing both included and excluded exogenous
variables
func : callable
function for the mean or conditional expectation of the endogenous
variable. The function will be called with parameters and the array of
explanatory, right hand side variables, `func(params, exog)`
Notes
-----
This class uses numerical differences to obtain the derivative of the
objective function. If the jacobian of the conditional mean function, `func`
is available, then it can be used by subclassing this class and defining
a method `jac_func`.
TODO: check required signature of jac_error and jac_func
"""
# This should be reversed:
# NonlinearIVGMM is IVGMM and need LinearIVGMM as special case (fit, predict)
def fitstart(self):
#might not make sense for more general functions
return np.zeros(self.exog.shape[1])
def __init__(self, endog, exog, instrument, func, **kwds):
self.func = func
super(NonlinearIVGMM, self).__init__(endog, exog, instrument, **kwds)
def predict(self, params, exog=None):
if exog is None:
exog = self.exog
return self.func(params, exog)
#---------- the following a semi-general versions,
# TODO: move to higher class after testing
def jac_func(self, params, weights, args=None, centered=True, epsilon=None):
# TODO: Why are ther weights in the signature - copy-paste error?
deriv = approx_fprime(params, self.func, args=(self.exog,),
centered=centered, epsilon=epsilon)
return deriv
def jac_error(self, params, weights, args=None, centered=True,
epsilon=None):
jac_func = self.jac_func(params, weights, args=None, centered=True,
epsilon=None)
return -jac_func
def score(self, params, weights, **kwds):
# **kwds for compatibility not used
# Note: I coud use general formula with gradient_momcond instead
z = self.instrument
nobs = z.shape[0]
jac_u = self.jac_error(params, weights, args=None, epsilon=None,
centered=True)
x = -jac_u # alias, plays the same role as X in linear model
u = self.get_error(params)
score = -2 * np.dot(np.dot(x.T, z), weights).dot(np.dot(z.T, u))
score /= nobs * nobs
return score
class IVGMMResults(GMMResults):
# this assumes that we have an additive error model `(y - f(x, params))`
@cache_readonly
def fittedvalues(self):
return self.model.predict(self.params)
@cache_readonly
def resid(self):
return self.model.endog - self.fittedvalues
@cache_readonly
def ssr(self):
return (self.resid * self.resid).sum(0)
def spec_hausman(params_e, params_i, cov_params_e, cov_params_i, dof=None):
'''Hausmans specification test
Parameters
----------
params_e : array
efficient and consistent under Null hypothesis,
inconsistent under alternative hypothesis
params_i: array
consistent under Null hypothesis,
consistent under alternative hypothesis
cov_params_e : array, 2d
covariance matrix of parameter estimates for params_e
cov_params_i : array, 2d
covariance matrix of parameter estimates for params_i
example instrumental variables OLS estimator is `e`, IV estimator is `i`
Notes
-----
Todos,Issues
- check dof calculations and verify for linear case
- check one-sided hypothesis
References
----------
Greene section 5.5 p.82/83
'''
params_diff = (params_i - params_e)
cov_diff = cov_params_i - cov_params_e
#TODO: the following is very inefficient, solves problem (svd) twice
#use linalg.lstsq or svd directly
#cov_diff will very often be in-definite (singular)
if not dof:
dof = np_matrix_rank(cov_diff)
cov_diffpinv = np.linalg.pinv(cov_diff)
H = np.dot(params_diff, np.dot(cov_diffpinv, params_diff))
pval = stats.chi2.sf(H, dof)
evals = np.linalg.eigvalsh(cov_diff)
return H, pval, dof, evals
###########
class DistQuantilesGMM(GMM):
'''
Estimate distribution parameters by GMM based on matching quantiles
Currently mainly to try out different requirements for GMM when we cannot
calculate the optimal weighting matrix.
'''
def __init__(self, endog, exog, instrument, **kwds):
#TODO: something wrong with super
super(DistQuantilesGMM, self).__init__(endog, exog, instrument)
#self.func = func
self.epsilon_iter = 1e-5
self.distfn = kwds['distfn']
#done by super doesn't work yet
#TypeError: super does not take keyword arguments
self.endog = endog
#make this optional for fit
if not 'pquant' in kwds:
self.pquant = pquant = np.array([0.01, 0.05,0.1,0.4,0.6,0.9,0.95,0.99])
else:
self.pquant = pquant = kwds['pquant']
#TODO: vectorize this: use edf
self.xquant = np.array([stats.scoreatpercentile(endog, p) for p
in pquant*100])
self.nmoms = len(self.pquant)
#TODOcopied from GMM, make super work
self.endog = endog
self.exog = exog
self.instrument = instrument
self.results = GMMResults(model=self)
#self.__dict__.update(kwds)
self.epsilon_iter = 1e-6
def fitstart(self):
#todo: replace with or add call to distfn._fitstart
# added but not used during testing, avoid Travis
distfn = self.distfn
if hasattr(distfn, '_fitstart'):
start = distfn._fitstart(self.endog)
else:
start = [1]*distfn.numargs + [0.,1.]
return np.asarray(start)
def momcond(self, params): #drop distfn as argument
#, mom2, quantile=None, shape=None
'''moment conditions for estimating distribution parameters by matching
quantiles, defines as many moment conditions as quantiles.
Returns
-------
difference : array
difference between theoretical and empirical quantiles
Notes
-----
This can be used for method of moments or for generalized method of
moments.
'''
#this check looks redundant/unused know
if len(params) == 2:
loc, scale = params
elif len(params) == 3:
shape, loc, scale = params
else:
#raise NotImplementedError
pass #see whether this might work, seems to work for beta with 2 shape args
#mom2diff = np.array(distfn.stats(*params)) - mom2
#if not quantile is None:
pq, xq = self.pquant, self.xquant
#ppfdiff = distfn.ppf(pq, alpha)
cdfdiff = self.distfn.cdf(xq, *params) - pq
#return np.concatenate([mom2diff, cdfdiff[:1]])
return np.atleast_2d(cdfdiff)
def fitonce(self, start=None, weights=None, has_optimal_weights=False):
'''fit without estimating an optimal weighting matrix and return results
This is a convenience function that calls fitgmm and covparams with
a given weight matrix or the identity weight matrix.
This is useful if the optimal weight matrix is know (or is analytically
given) or if an optimal weight matrix cannot be calculated.
(Developer Notes: this function could go into GMM, but is needed in this
class, at least at the moment.)
Parameters
----------
Returns
-------
results : GMMResult instance
result instance with params and _cov_params attached
See Also
--------
fitgmm
cov_params
'''
if weights is None:
weights = np.eye(self.nmoms)
params = self.fitgmm(start=start)
# TODO: rewrite this old hack, should use fitgmm or fit maxiter=0
self.results.params = params #required before call to self.cov_params
self.results.wargs = {} #required before call to self.cov_params
self.results.options_other = {'weights_method':'cov'}
# TODO: which weights_method? There shouldn't be any needed ?
_cov_params = self.results.cov_params(weights=weights,
has_optimal_weights=has_optimal_weights)
self.results.weights = weights
self.results.jval = self.gmmobjective(params, weights)
self.results.options_other.update({'has_optimal_weights':has_optimal_weights})
return self.results
results_class_dict = {'GMMResults': GMMResults,
'IVGMMResults': IVGMMResults,
'DistQuantilesGMM': GMMResults} #TODO: should be a default
| bsd-3-clause |
le9i0nx/ansible | test/units/modules/network/netscaler/test_netscaler_gslb_service.py | 39 | 27775 |
# Copyright (c) 2017 Citrix Systems
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from ansible.compat.tests.mock import patch, Mock, MagicMock, call
from units.modules.utils import set_module_args
from .netscaler_module import TestModule, nitro_base_patcher
import sys
if sys.version_info[:2] != (2, 6):
import requests
class TestNetscalerGSLBSiteModule(TestModule):
@classmethod
def setUpClass(cls):
class MockException(Exception):
pass
cls.MockException = MockException
m = MagicMock()
nssrc_modules_mock = {
'nssrc.com.citrix.netscaler.nitro.resource.config.gslb': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbservice': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbservice.gslbservice': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbservice_lbmonitor_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbservice_lbmonitor_binding.gslbservice_lbmonitor_binding': m,
# The following are needed because of monkey_patch_nitro_api()
'nssrc.com.citrix.netscaler.nitro.resource.base': m,
'nssrc.com.citrix.netscaler.nitro.resource.base.Json': m,
'nssrc.com.citrix.netscaler.nitro.resource.base.Json.Json': m,
'nssrc.com.citrix.netscaler.nitro.util': m,
'nssrc.com.citrix.netscaler.nitro.util.nitro_util': m,
'nssrc.com.citrix.netscaler.nitro.util.nitro_util.nitro_util': m,
}
cls.nitro_specific_patcher = patch.dict(sys.modules, nssrc_modules_mock)
cls.nitro_base_patcher = nitro_base_patcher
@classmethod
def tearDownClass(cls):
cls.nitro_base_patcher.stop()
cls.nitro_specific_patcher.stop()
def setUp(self):
super(TestNetscalerGSLBSiteModule, self).setUp()
self.nitro_base_patcher.start()
self.nitro_specific_patcher.start()
# Setup minimal required arguments to pass AnsibleModule argument parsing
def tearDown(self):
super(TestNetscalerGSLBSiteModule, self).tearDown()
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
def test_graceful_nitro_api_import_error(self):
# Stop nitro api patching to cause ImportError
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
from ansible.modules.network.netscaler import netscaler_gslb_service
self.module = netscaler_gslb_service
result = self.failed()
self.assertEqual(result['msg'], 'Could not load nitro python sdk')
def test_graceful_nitro_error_on_login(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
client_mock = Mock()
client_mock.login = Mock(side_effect=MockException)
m = Mock(return_value=client_mock)
with patch('ansible.modules.network.netscaler.netscaler_gslb_service.get_nitro_client', m):
with patch('ansible.modules.network.netscaler.netscaler_gslb_service.nitro_exception', MockException):
self.module = netscaler_gslb_service
result = self.failed()
self.assertTrue(result['msg'].startswith('nitro exception'), msg='nitro exception during login not handled properly')
def test_graceful_no_connection_error(self):
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
class MockException(Exception):
pass
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.ConnectionError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
get_nitro_client=m,
nitro_exception=MockException,
):
self.module = netscaler_gslb_service
result = self.failed()
self.assertTrue(result['msg'].startswith('Connection error'), msg='Connection error was not handled gracefully')
def test_graceful_login_error(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
class MockException(Exception):
pass
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.SSLError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
get_nitro_client=m,
monkey_patch_nitro_api=Mock(),
nitro_exception=MockException,
):
self.module = netscaler_gslb_service
result = self.failed()
self.assertTrue(result['msg'].startswith('SSL Error'), msg='SSL Error was not handled gracefully')
def test_ensure_feature_is_enabled_called(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
gslb_service_proxy_mock = Mock()
ensure_feature_is_enabled_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
get_nitro_client=Mock(return_value=client_mock),
gslb_service_exists=Mock(side_effect=[False, True]),
gslb_service_identical=Mock(side_effect=[True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=ensure_feature_is_enabled_mock,
monkey_patch_nitro_api=Mock(),
ConfigProxy=Mock(return_value=gslb_service_proxy_mock),
):
self.module = netscaler_gslb_service
self.exited()
ensure_feature_is_enabled_mock.assert_called_with(client_mock, 'GSLB')
def test_save_config_called_on_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
gslb_service_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
get_nitro_client=m,
gslb_service_exists=Mock(side_effect=[False, True]),
gslb_service_identical=Mock(side_effect=[True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
ConfigProxy=Mock(return_value=gslb_service_proxy_mock),
):
self.module = netscaler_gslb_service
self.exited()
self.assertIn(call.save_config(), client_mock.mock_calls)
def test_save_config_called_on_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
gslb_service_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
get_nitro_client=m,
gslb_service_exists=Mock(side_effect=[True, False]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
ConfigProxy=Mock(return_value=gslb_service_proxy_mock),
):
self.module = netscaler_gslb_service
self.exited()
self.assertIn(call.save_config(), client_mock.mock_calls)
def test_save_config_not_called_on_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
gslb_service_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
get_nitro_client=m,
gslb_service_exists=Mock(side_effect=[False, True]),
gslb_service_identical=Mock(side_effect=[True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
ConfigProxy=Mock(return_value=gslb_service_proxy_mock),
):
self.module = netscaler_gslb_service
self.exited()
self.assertNotIn(call.save_config(), client_mock.mock_calls)
def test_save_config_not_called_on_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='absent',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
gslb_service_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
get_nitro_client=m,
gslb_service_exists=Mock(side_effect=[True, False]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
ConfigProxy=Mock(return_value=gslb_service_proxy_mock),
):
self.module = netscaler_gslb_service
self.exited()
self.assertNotIn(call.save_config(), client_mock.mock_calls)
def test_new_gslb_site_execution_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_service_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_service_proxy_mock = Mock()
gslb_service_proxy_mock.configure_mock(**glsb_service_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_service_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
get_nitro_client=m,
gslb_service_exists=Mock(side_effect=[False, True]),
gslb_service_identical=Mock(side_effect=[True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_service
self.exited()
gslb_service_proxy_mock.assert_has_calls([call.add()])
def test_modified_gslb_site_execution_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_service_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_service_proxy_mock = Mock()
gslb_service_proxy_mock.configure_mock(**glsb_service_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_service_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_service_exists=Mock(side_effect=[True, True]),
gslb_service_identical=Mock(side_effect=[False, False, True]),
monitor_bindings_identical=Mock(side_effect=[True, True, True]),
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
nitro_exception=self.MockException,
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_service
self.exited()
gslb_service_proxy_mock.assert_has_calls([call.update()])
def test_absent_gslb_site_execution_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_service_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_service_proxy_mock = Mock()
gslb_service_proxy_mock.configure_mock(**glsb_service_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_service_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_service_exists=Mock(side_effect=[True, False]),
gslb_service_identical=Mock(side_effect=[False, True]),
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_service
self.exited()
gslb_service_proxy_mock.assert_has_calls([call.delete()])
def test_present_gslb_service_identical_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_service_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_service_proxy_mock = Mock()
gslb_service_proxy_mock.configure_mock(**glsb_service_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_service_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_service_exists=Mock(side_effect=[True, True]),
gslb_service_identical=Mock(side_effect=[True, True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_service
self.exited()
gslb_service_proxy_mock.assert_not_called()
def test_absent_gslb_site_noop_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_service_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_service_proxy_mock = Mock()
gslb_service_proxy_mock.configure_mock(**glsb_service_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_service_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_service_exists=Mock(side_effect=[False, False]),
gslb_service_identical=Mock(side_effect=[False, False]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_service
self.exited()
gslb_service_proxy_mock.assert_not_called()
def test_present_gslb_site_failed_update(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_service_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_service_proxy_mock = Mock()
gslb_service_proxy_mock.configure_mock(**glsb_service_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_service_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_service_exists=Mock(side_effect=[True, True]),
gslb_service_identical=Mock(side_effect=[False, False, False]),
monitor_bindings_identical=Mock(side_effect=[True, True, True]),
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_service
result = self.failed()
self.assertEqual(result['msg'], 'GSLB service differs from configured')
self.assertTrue(result['failed'])
def test_present_gslb_site_failed_monitor_bindings_update(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_service_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_service_proxy_mock = Mock()
gslb_service_proxy_mock.configure_mock(**glsb_service_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_service_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_service_exists=Mock(side_effect=[True, True]),
gslb_service_identical=Mock(side_effect=[False, False, True]),
monitor_bindings_identical=Mock(side_effect=[False, False, False]),
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_service
result = self.failed()
self.assertEqual(result['msg'], 'Monitor bindings differ from configured')
self.assertTrue(result['failed'])
def test_present_gslb_site_failed_create(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_service_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_service_proxy_mock = Mock()
gslb_service_proxy_mock.configure_mock(**glsb_service_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_service_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_service_exists=Mock(side_effect=[False, False]),
gslb_service_identical=Mock(side_effect=[False, False]),
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_service
result = self.failed()
self.assertEqual(result['msg'], 'GSLB service does not exist')
self.assertTrue(result['failed'])
def test_present_gslb_site_update_immutable_attribute(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_service_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_service_proxy_mock = Mock()
gslb_service_proxy_mock.configure_mock(**glsb_service_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_service_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=['domain']),
gslb_service_exists=Mock(side_effect=[True, True]),
gslb_service_identical=Mock(side_effect=[False, False]),
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_service
result = self.failed()
self.assertEqual(result['msg'], 'Cannot update immutable attributes [\'domain\']')
self.assertTrue(result['failed'])
def test_absent_gslb_site_failed_delete(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_service_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_service_proxy_mock = Mock()
gslb_service_proxy_mock.configure_mock(**glsb_service_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_service_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_service_exists=Mock(side_effect=[True, True]),
gslb_service_identical=Mock(side_effect=[False, False]),
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_service
result = self.failed()
self.assertEqual(result['msg'], 'GSLB service still exists')
self.assertTrue(result['failed'])
def test_graceful_nitro_exception_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
m = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
gslb_service_exists=m,
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
nitro_exception=MockException
):
self.module = netscaler_gslb_service
result = self.failed()
self.assertTrue(
result['msg'].startswith('nitro exception'),
msg='Nitro exception not caught on operation absent'
)
def test_graceful_nitro_exception_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
m = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
gslb_service_exists=m,
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
nitro_exception=MockException
):
self.module = netscaler_gslb_service
result = self.failed()
self.assertTrue(
result['msg'].startswith('nitro exception'),
msg='Nitro exception not caught on operation absent'
)
| gpl-3.0 |
chouseknecht/ansible | lib/ansible/modules/storage/purestorage/purefa_arrayname.py | 19 | 2462 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Simon Dodsley (simon@purestorage.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefa_arrayname
version_added: '2.9'
short_description: Configure Pure Storage FlashArray array name
description:
- Configure name of array for Pure Storage FlashArrays.
- Ideal for Day 0 initial configuration.
author:
- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
options:
state:
description: Set the array name
type: str
default: present
choices: [ present ]
name:
description:
- Name of the array. Must conform to correct naming schema.
type: str
required: true
extends_documentation_fragment:
- purestorage.fa
'''
EXAMPLES = r'''
- name: Set new array name
purefa_arrayname:
name: new-array-name
state: present
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
'''
RETURN = r'''
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_system, purefa_argument_spec
def update_name(module, array):
"""Change array name"""
changed = False
try:
array.set(name=module.params['name'])
changed = True
except Exception:
module.fail_json(msg='Failed to change array name to {0}'.format(module.params['name']))
module.exit_json(changed=changed)
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present']),
))
module = AnsibleModule(argument_spec,
supports_check_mode=False)
array = get_system(module)
pattern = re.compile("^[a-zA-Z0-9]([a-zA-Z0-9-]{0,54}[a-zA-Z0-9])?$")
if not pattern.match(module.params['name']):
module.fail_json(msg='Array name {0} does not conform to array name rules. See documentation.'.format(module.params['name']))
if module.params['name'] != array.get()['array_name']:
update_name(module, array)
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| gpl-3.0 |
mbode/flink | flink-python/pyflink/table/table.py | 2 | 26520 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import sys
from py4j.java_gateway import get_method
from pyflink.java_gateway import get_gateway
from pyflink.table.table_schema import TableSchema
from pyflink.table.window import GroupWindow
from pyflink.util.utils import to_jarray
if sys.version > '3':
xrange = range
__all__ = ['Table', 'GroupedTable', 'GroupWindowedTable', 'OverWindowedTable', 'WindowGroupedTable']
class Table(object):
"""
A :class:`Table` is the core component of the Table API.
Similar to how the batch and streaming APIs have DataSet and DataStream,
the Table API is built around :class:`Table`.
Use the methods of :class:`Table` to transform data.
Example:
::
>>> env = StreamExecutionEnvironment.get_execution_environment()
>>> env.set_parallelism(1)
>>> t_env = StreamTableEnvironment.create(env)
>>> ...
>>> t_env.register_table_source("source", ...)
>>> t = t_env.scan("source")
>>> t.select(...)
>>> ...
>>> t_env.register_table_sink("result", ...)
>>> t.insert_into("result")
>>> t_env.execute("table_job")
Operations such as :func:`~pyflink.table.Table.join`, :func:`~pyflink.table.Table.select`,
:func:`~pyflink.table.Table.where` and :func:`~pyflink.table.Table.group_by`
take arguments in an expression string. Please refer to the documentation for
the expression syntax.
"""
def __init__(self, j_table):
self._j_table = j_table
def select(self, fields):
"""
Performs a selection operation. Similar to a SQL SELECT statement. The field expressions
can contain complex expressions.
Example:
::
>>> tab.select("key, value + 'hello'")
:param fields: Expression string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.select(fields))
def alias(self, fields):
"""
Renames the fields of the expression result. Use this to disambiguate fields before
joining to operations.
Example:
::
>>> tab.alias("a, b")
:param fields: Field list expression string.
:return: The result :class:`Table`.
"""
return Table(get_method(self._j_table, "as")(fields))
def filter(self, predicate):
"""
Filters out elements that don't pass the filter predicate. Similar to a SQL WHERE
clause.
Example:
::
>>> tab.filter("name = 'Fred'")
:param predicate: Predicate expression string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.filter(predicate))
def where(self, predicate):
"""
Filters out elements that don't pass the filter predicate. Similar to a SQL WHERE
clause.
Example:
::
>>> tab.where("name = 'Fred'")
:param predicate: Predicate expression string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.where(predicate))
def group_by(self, fields):
"""
Groups the elements on some grouping keys. Use this before a selection with aggregations
to perform the aggregation on a per-group basis. Similar to a SQL GROUP BY statement.
Example:
::
>>> tab.group_by("key").select("key, value.avg")
:param fields: Group keys.
:return: The grouped :class:`Table`.
"""
return GroupedTable(self._j_table.groupBy(fields))
def distinct(self):
"""
Removes duplicate values and returns only distinct (different) values.
Example:
::
>>> tab.select("key, value").distinct()
:return: The result :class:`Table`.
"""
return Table(self._j_table.distinct())
def join(self, right, join_predicate=None):
"""
Joins two :class:`Table`. Similar to a SQL join. The fields of the two joined
operations must not overlap, use :func:`~pyflink.table.Table.alias` to rename fields if
necessary. You can use where and select clauses after a join to further specify the
behaviour of the join.
.. note::
Both tables must be bound to the same :class:`TableEnvironment` .
Example:
::
>>> left.join(right).where("a = b && c > 3").select("a, b, d")
>>> left.join(right, "a = b")
:param right: Right table.
:param join_predicate: Optional, the join predicate expression string.
:return: The result :class:`Table`.
"""
if join_predicate is not None:
return Table(self._j_table.join(right._j_table, join_predicate))
else:
return Table(self._j_table.join(right._j_table))
def left_outer_join(self, right, join_predicate=None):
"""
Joins two :class:`Table`. Similar to a SQL left outer join. The fields of the two joined
operations must not overlap, use :func:`~pyflink.table.Table.alias` to rename fields if
necessary.
.. note::
Both tables must be bound to the same :class:`TableEnvironment` and its
:class:`TableConfig` must have null check enabled (default).
Example:
::
>>> left.left_outer_join(right).select("a, b, d")
>>> left.left_outer_join(right, "a = b").select("a, b, d")
:param right: Right table.
:param join_predicate: Optional, the join predicate expression string.
:return: The result :class:`Table`.
"""
if join_predicate is None:
return Table(self._j_table.leftOuterJoin(right._j_table))
else:
return Table(self._j_table.leftOuterJoin(right._j_table, join_predicate))
def right_outer_join(self, right, join_predicate):
"""
Joins two :class:`Table`. Similar to a SQL right outer join. The fields of the two joined
operations must not overlap, use :func:`~pyflink.table.Table.alias` to rename fields if
necessary.
.. note::
Both tables must be bound to the same :class:`TableEnvironment` and its
:class:`TableConfig` must have null check enabled (default).
Example:
::
>>> left.right_outer_join(right, "a = b").select("a, b, d")
:param right: Right table.
:param join_predicate: The join predicate expression string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.rightOuterJoin(right._j_table, join_predicate))
def full_outer_join(self, right, join_predicate):
"""
Joins two :class:`Table`. Similar to a SQL full outer join. The fields of the two joined
operations must not overlap, use :func:`~pyflink.table.Table.alias` to rename fields if
necessary.
.. note::
Both tables must be bound to the same :class:`TableEnvironment` and its
:class:`TableConfig` must have null check enabled (default).
Example:
::
>>> left.full_outer_join(right, "a = b").select("a, b, d")
:param right: Right table.
:param join_predicate: The join predicate expression string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.fullOuterJoin(right._j_table, join_predicate))
def join_lateral(self, table_function_call, join_predicate=None):
"""
Joins this Table with an user-defined TableFunction. This join is similar to a SQL inner
join but works with a table function. Each row of the table is joined with the rows
produced by the table function.
Example:
::
>>> t_env.register_java_function("split", "java.table.function.class.name")
>>> tab.join_lateral("split(text, ' ') as (b)", "a = b")
:param table_function_call: An expression representing a table function call.
:type table_function_call: str
:param join_predicate: Optional, The join predicate expression string, join ON TRUE if not
exist.
:type join_predicate: str
:return: The result Table.
:rtype: Table
"""
if join_predicate is None:
return Table(self._j_table.joinLateral(table_function_call))
else:
return Table(self._j_table.joinLateral(table_function_call, join_predicate))
def left_outer_join_lateral(self, table_function_call, join_predicate=None):
"""
Joins this Table with an user-defined TableFunction. This join is similar to
a SQL left outer join but works with a table function. Each row of the table is joined
with all rows produced by the table function. If the join does not produce any row, the
outer row is padded with nulls.
Example:
::
>>> t_env.register_java_function("split", "java.table.function.class.name")
>>> tab.left_outer_join_lateral("split(text, ' ') as (b)")
:param table_function_call: An expression representing a table function call.
:type table_function_call: str
:param join_predicate: Optional, The join predicate expression string, join ON TRUE if not
exist.
:type join_predicate: str
:return: The result Table.
:rtype: Table
"""
if join_predicate is None:
return Table(self._j_table.leftOuterJoinLateral(table_function_call))
else:
return Table(self._j_table.leftOuterJoinLateral(table_function_call, join_predicate))
def minus(self, right):
"""
Minus of two :class:`Table` with duplicate records removed.
Similar to a SQL EXCEPT clause. Minus returns records from the left table that do not
exist in the right table. Duplicate records in the left table are returned
exactly once, i.e., duplicates are removed. Both tables must have identical field types.
.. note::
Both tables must be bound to the same :class:`TableEnvironment`.
Example:
::
>>> left.minus(right)
:param right: Right table.
:return: The result :class:`Table`.
"""
return Table(self._j_table.minus(right._j_table))
def minus_all(self, right):
"""
Minus of two :class:`Table`. Similar to a SQL EXCEPT ALL.
Similar to a SQL EXCEPT ALL clause. MinusAll returns the records that do not exist in
the right table. A record that is present n times in the left table and m times
in the right table is returned (n - m) times, i.e., as many duplicates as are present
in the right table are removed. Both tables must have identical field types.
.. note::
Both tables must be bound to the same :class:`TableEnvironment`.
Example:
::
>>> left.minus_all(right)
:param right: Right table.
:return: The result :class:`Table`.
"""
return Table(self._j_table.minusAll(right._j_table))
def union(self, right):
"""
Unions two :class:`Table` with duplicate records removed.
Similar to a SQL UNION. The fields of the two union operations must fully overlap.
.. note::
Both tables must be bound to the same :class:`TableEnvironment`.
Example:
::
>>> left.union(right)
:param right: Right table.
:return: The result :class:`Table`.
"""
return Table(self._j_table.union(right._j_table))
def union_all(self, right):
"""
Unions two :class:`Table`. Similar to a SQL UNION ALL. The fields of the two union
operations must fully overlap.
.. note::
Both tables must be bound to the same :class:`TableEnvironment`.
Example:
::
>>> left.union_all(right)
:param right: Right table.
:return: The result :class:`Table`.
"""
return Table(self._j_table.unionAll(right._j_table))
def intersect(self, right):
"""
Intersects two :class:`Table` with duplicate records removed. Intersect returns records
that exist in both tables. If a record is present in one or both tables more than once,
it is returned just once, i.e., the resulting table has no duplicate records. Similar to a
SQL INTERSECT. The fields of the two intersect operations must fully overlap.
.. note::
Both tables must be bound to the same :class:`TableEnvironment`.
Example:
::
>>> left.intersect(right)
:param right: Right table.
:return: The result :class:`Table`.
"""
return Table(self._j_table.intersect(right._j_table))
def intersect_all(self, right):
"""
Intersects two :class:`Table`. IntersectAll returns records that exist in both tables.
If a record is present in both tables more than once, it is returned as many times as it
is present in both tables, i.e., the resulting table might have duplicate records. Similar
to an SQL INTERSECT ALL. The fields of the two intersect operations must fully overlap.
.. note::
Both tables must be bound to the same :class:`TableEnvironment`.
Example:
::
>>> left.intersect_all(right)
:param right: Right table.
:return: The result :class:`Table`.
"""
return Table(self._j_table.intersectAll(right._j_table))
def order_by(self, fields):
"""
Sorts the given :class:`Table`. Similar to SQL ORDER BY.
The resulting Table is sorted globally sorted across all parallel partitions.
Example:
::
>>> tab.order_by("name.desc")
:param fields: Order fields expression string,
:return: The result :class:`Table`.
"""
return Table(self._j_table.orderBy(fields))
def offset(self, offset):
"""
Limits a sorted result from an offset position.
Similar to a SQL OFFSET clause. Offset is technically part of the Order By operator and
thus must be preceded by it.
:func:`~pyflink.table.Table.offset` can be combined with a subsequent
:func:`~pyflink.table.Table.fetch` call to return n rows after skipping the first o rows.
Example:
::
# skips the first 3 rows and returns all following rows.
>>> tab.order_by("name.desc").offset(3)
# skips the first 10 rows and returns the next 5 rows.
>>> tab.order_by("name.desc").offset(10).fetch(5)
:param offset: Number of records to skip.
:return: The result :class:`Table`.
"""
return Table(self._j_table.offset(offset))
def fetch(self, fetch):
"""
Limits a sorted result to the first n rows.
Similar to a SQL FETCH clause. Fetch is technically part of the Order By operator and
thus must be preceded by it.
:func:`~pyflink.table.Table.offset` can be combined with a preceding
:func:`~pyflink.table.Table.fetch` call to return n rows after skipping the first o rows.
Example:
Returns the first 3 records.
::
>>> tab.order_by("name.desc").fetch(3)
Skips the first 10 rows and returns the next 5 rows.
::
>>> tab.order_by("name.desc").offset(10).fetch(5)
:param fetch: The number of records to return. Fetch must be >= 0.
:return: The result :class:`Table`.
"""
return Table(self._j_table.fetch(fetch))
def window(self, window):
"""
Defines group window on the records of a table.
A group window groups the records of a table by assigning them to windows defined by a time
or row interval.
For streaming tables of infinite size, grouping into windows is required to define finite
groups on which group-based aggregates can be computed.
For batch tables of finite size, windowing essentially provides shortcuts for time-based
groupBy.
.. note::
Computing windowed aggregates on a streaming table is only a parallel operation
if additional grouping attributes are added to the
:func:`~pyflink.table.GroupWindowedTable.group_by` clause.
If the :func:`~pyflink.table.GroupWindowedTable.group_by` only references a GroupWindow
alias, the streamed table will be processed by a single task, i.e., with parallelism 1.
Example:
::
>>> tab.window(Tumble.over("10.minutes").on("rowtime").alias("w")) \\
... .group_by("w") \\
... .select("a.sum as a, w.start as b, w.end as c, w.rowtime as d")
:param window: A :class:`pyflink.table.window.GroupWindow` created from
:class:`pyflink.table.window.Tumble`, :class:`pyflink.table.window.Session`
or :class:`pyflink.table.window.Slide`.
:return: A :class:`GroupWindowedTable`.
"""
# type: (GroupWindow) -> GroupWindowedTable
return GroupWindowedTable(self._j_table.window(window._java_window))
def over_window(self, *over_windows):
"""
Defines over-windows on the records of a table.
An over-window defines for each record an interval of records over which aggregation
functions can be computed.
Example:
::
>>> table.window(Over.partition_by("c").order_by("rowTime") \\
... .preceding("10.seconds").alias("ow")) \\
... .select("c, b.count over ow, e.sum over ow")
.. note::
Computing over window aggregates on a streaming table is only a parallel
operation if the window is partitioned. Otherwise, the whole stream will be processed
by a single task, i.e., with parallelism 1.
.. note::
Over-windows for batch tables are currently not supported.
:param over_windows: :class:`OverWindow`s created from :class:`Over`.
:return: A :class:`OverWindowedTable`.
"""
gateway = get_gateway()
window_array = to_jarray(gateway.jvm.OverWindow,
[item._java_over_window for item in over_windows])
return OverWindowedTable(self._j_table.window(window_array))
def add_columns(self, fields):
"""
Adds additional columns. Similar to a SQL SELECT statement. The field expressions
can contain complex expressions, but can not contain aggregations. It will throw an
exception if the added fields already exist.
Example:
::
>>> tab.add_columns("a + 1 as a1, concat(b, 'sunny') as b1")
:param fields: Column list string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.addColumns(fields))
def add_or_replace_columns(self, fields):
"""
Adds additional columns. Similar to a SQL SELECT statement. The field expressions
can contain complex expressions, but can not contain aggregations. Existing fields will be
replaced if add columns name is the same as the existing column name. Moreover, if the added
fields have duplicate field name, then the last one is used.
Example:
::
>>> tab.add_or_replace_columns("a + 1 as a1, concat(b, 'sunny') as b1")
:param fields: Column list string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.addOrReplaceColumns(fields))
def rename_columns(self, fields):
"""
Renames existing columns. Similar to a field alias statement. The field expressions
should be alias expressions, and only the existing fields can be renamed.
Example:
::
>>> tab.rename_columns("a as a1, b as b1")
:param fields: Column list string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.renameColumns(fields))
def drop_columns(self, fields):
"""
Drops existing columns. The field expressions should be field reference expressions.
Example:
::
>>> tab.drop_columns("a, b")
:param fields: Column list string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.dropColumns(fields))
def insert_into(self, table_path, *table_path_continued):
"""
Writes the :class:`Table` to a :class:`TableSink` that was registered under
the specified name. For the path resolution algorithm see
:func:`~TableEnvironment.use_database`.
Example:
::
>>> tab.insert_into("sink")
:param table_path: The first part of the path of the registered :class:`TableSink` to which
the :class:`Table` is written. This is to ensure at least the name of the
:class:`Table` is provided.
:param table_path_continued: The remaining part of the path of the registered
:class:`TableSink` to which the :class:`Table` is written.
"""
gateway = get_gateway()
j_table_path = to_jarray(gateway.jvm.String, table_path_continued)
self._j_table.insertInto(table_path, j_table_path)
def get_schema(self):
"""
Returns the :class:`TableSchema` of this table.
:return: The schema of this table.
"""
return TableSchema(j_table_schema=self._j_table.getSchema())
def print_schema(self):
"""
Prints the schema of this table to the console in a tree format.
"""
self._j_table.printSchema()
def __str__(self):
return self._j_table.toString()
class GroupedTable(object):
"""
A table that has been grouped on a set of grouping keys.
"""
def __init__(self, java_table):
self._j_table = java_table
def select(self, fields):
"""
Performs a selection operation on a grouped table. Similar to an SQL SELECT statement.
The field expressions can contain complex expressions and aggregations.
Example:
::
>>> tab.group_by("key").select("key, value.avg + ' The average' as average")
:param fields: Expression string that contains group keys and aggregate function calls.
:return: The result :class:`Table`.
"""
return Table(self._j_table.select(fields))
class GroupWindowedTable(object):
"""
A table that has been windowed for :class:`pyflink.table.window.GroupWindow`.
"""
def __init__(self, java_group_windowed_table):
self._j_table = java_group_windowed_table
def group_by(self, fields):
"""
Groups the elements by a mandatory window and one or more optional grouping attributes.
The window is specified by referring to its alias.
If no additional grouping attribute is specified and if the input is a streaming table,
the aggregation will be performed by a single task, i.e., with parallelism 1.
Aggregations are performed per group and defined by a subsequent
:func:`~pyflink.table.WindowGroupedTable.select` clause similar to SQL SELECT-GROUP-BY
query.
Example:
::
>>> tab.window(group_window.alias("w")).group_by("w, key").select("key, value.avg")
:param fields: Group keys.
:return: A :class:`WindowGroupedTable`.
"""
return WindowGroupedTable(self._j_table.groupBy(fields))
class WindowGroupedTable(object):
"""
A table that has been windowed and grouped for :class:`pyflink.table.window.GroupWindow`.
"""
def __init__(self, java_window_grouped_table):
self._j_table = java_window_grouped_table
def select(self, fields):
"""
Performs a selection operation on a window grouped table. Similar to an SQL SELECT
statement.
The field expressions can contain complex expressions and aggregations.
Example:
::
>>> window_grouped_table.select("key, window.start, value.avg as valavg")
:param fields: Expression string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.select(fields))
class OverWindowedTable(object):
"""
A table that has been windowed for :class:`pyflink.table.window.OverWindow`.
Unlike group windows, which are specified in the GROUP BY clause, over windows do not collapse
rows. Instead over window aggregates compute an aggregate for each input row over a range of
its neighboring rows.
"""
def __init__(self, java_over_windowed_table):
self._j_table = java_over_windowed_table
def select(self, fields):
"""
Performs a selection operation on a over windowed table. Similar to an SQL SELECT
statement.
The field expressions can contain complex expressions and aggregations.
Example:
::
>>> over_windowed_table.select("c, b.count over ow, e.sum over ow")
:param fields: Expression string.
:return: The result :class:`Table`.
"""
return Table(self._j_table.select(fields))
| apache-2.0 |
spinellic/Mission-Planner | Lib/nturl2path.py | 60 | 2437 | """Convert a NT pathname to a file URL and vice versa."""
def url2pathname(url):
"""OS-specific conversion from a relative URL of the 'file' scheme
to a file system path; not recommended for general use."""
# e.g.
# ///C|/foo/bar/spam.foo
# becomes
# C:\foo\bar\spam.foo
import string, urllib
# Windows itself uses ":" even in URLs.
url = url.replace(':', '|')
if not '|' in url:
# No drive specifier, just convert slashes
if url[:4] == '////':
# path is something like ////host/path/on/remote/host
# convert this to \\host\path\on\remote\host
# (notice halving of slashes at the start of the path)
url = url[2:]
components = url.split('/')
# make sure not to convert quoted slashes :-)
return urllib.unquote('\\'.join(components))
comp = url.split('|')
if len(comp) != 2 or comp[0][-1] not in string.ascii_letters:
error = 'Bad URL: ' + url
raise IOError, error
drive = comp[0][-1].upper()
path = drive + ':'
components = comp[1].split('/')
for comp in components:
if comp:
path = path + '\\' + urllib.unquote(comp)
# Issue #11474: url like '/C|/' should convert into 'C:\\'
if path.endswith(':') and url.endswith('/'):
path += '\\'
return path
def pathname2url(p):
"""OS-specific conversion from a file system path to a relative URL
of the 'file' scheme; not recommended for general use."""
# e.g.
# C:\foo\bar\spam.foo
# becomes
# ///C|/foo/bar/spam.foo
import urllib
if not ':' in p:
# No drive specifier, just convert slashes and quote the name
if p[:2] == '\\\\':
# path is something like \\host\path\on\remote\host
# convert this to ////host/path/on/remote/host
# (notice doubling of slashes at the start of the path)
p = '\\\\' + p
components = p.split('\\')
return urllib.quote('/'.join(components))
comp = p.split(':')
if len(comp) != 2 or len(comp[0]) > 1:
error = 'Bad path: ' + p
raise IOError, error
drive = urllib.quote(comp[0].upper())
components = comp[1].split('\\')
path = '///' + drive + ':'
for comp in components:
if comp:
path = path + '/' + urllib.quote(comp)
return path
| gpl-3.0 |
Jun1113/MapReduce-Example | contrib/hod/hodlib/Common/nodepoolutil.py | 182 | 1113 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from hodlib.NodePools.torque import TorquePool
class NodePoolUtil:
def getNodePool(nodePoolDesc, cfg, log):
"""returns a concrete instance of NodePool as configured by 'cfg'"""
npd = nodePoolDesc
name = npd.getName()
if name == 'torque':
return TorquePool(npd, cfg, log)
getNodePool = staticmethod(getNodePool)
| apache-2.0 |
lantianlz/zx | www/admin/views_toutiao_type.py | 1 | 2396 | # -*- coding: utf-8 -*-
import json
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.conf import settings
from www.misc.decorators import staff_required, common_ajax_response, verify_permission
from www.misc import qiniu_client
from common import utils, page
from www.toutiao.interface import ArticleTypeBase
@verify_permission('')
def toutiao_type(request, template_name='admin/toutiao_type.html'):
# from www.kaihu.models import FriendlyLink
# link_types = [{'value': x[0], 'name': x[1]} for x in FriendlyLink.link_type_choices]
return render_to_response(template_name, locals(), context_instance=RequestContext(request))
@verify_permission('add_toutiao_type')
@common_ajax_response
def add_type(request):
name = request.REQUEST.get('name')
domain = request.REQUEST.get('domain')
sort_num = request.REQUEST.get('sort_num')
code, msg = ArticleTypeBase().add_article_type(name, domain, sort_num)
return code, msg if code else msg.id
def format_type(objs, num):
data = []
for x in objs:
num += 1
data.append({
'num': num,
'type_id': x.id,
'name': x.name,
'domain': x.domain,
'sort_num': x.sort_num,
'state': x.state
})
return data
@verify_permission('query_toutiao_type')
def search(request):
data = []
data = format_type(ArticleTypeBase().get_article_types(), 0)
return HttpResponse(
json.dumps({'data': data}),
mimetype='application/json'
)
@verify_permission('query_toutiao_type')
def get_type_by_id(request):
type_id = request.REQUEST.get('type_id')
data = format_type([ArticleTypeBase().get_type_by_id(type_id)], 1)[0]
return HttpResponse(json.dumps(data), mimetype='application/json')
@verify_permission('modify_toutiao_type')
@common_ajax_response
def modify_type(request):
type_id = request.REQUEST.get('type_id')
name = request.REQUEST.get('name')
domain = request.REQUEST.get('domain')
state = request.REQUEST.get('state')
state = True if state == "1" else False
sort_num = int(request.REQUEST.get('sort_num'))
return ArticleTypeBase().modify_article_type(
type_id, name=name, sort_num=sort_num, domain=domain, state=state
)
| gpl-2.0 |
bpsinc-native/src_third_party_scons-2.0.1 | engine/SCons/Tool/packaging/src_targz.py | 61 | 1746 | """SCons.Tool.Packaging.targz
The targz SRC packager.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/packaging/src_targz.py 5134 2010/08/16 23:02:40 bdeegan"
from SCons.Tool.packaging import putintopackageroot
def package(env, target, source, PACKAGEROOT, **kw):
bld = env['BUILDERS']['Tar']
bld.set_suffix('.tar.gz')
target, source = putintopackageroot(target, source, env, PACKAGEROOT, honor_install_location=0)
return bld(env, target, source, TARFLAGS='-zc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
yaroslav-tarasov/avango | attic/avango-vrpn/python/avango/vrpn/__init__.py | 6 | 1857 | # -*- Mode:Python -*-
##########################################################################
# #
# This file is part of Avango. #
# #
# Copyright 1997 - 2009 Fraunhofer-Gesellschaft zur Foerderung der #
# angewandten Forschung (FhG), Munich, Germany. #
# #
# Avango is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Lesser General Public License as #
# published by the Free Software Foundation, version 3. #
# #
# Avango is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with Avango. If not, see <http://www.gnu.org/licenses/>. #
# #
# Avango is a trademark owned by FhG. #
# #
##########################################################################
from _nodes import *
from _vrpn import *
from _dtrack_device import *
from _dtrack_target import *
#import avango.nodefactory
#nodes = avango.nodefactory.NodeFactory('av::vrpn::')
| lgpl-3.0 |
Sing-Li/go-buildpack | builds/runtimes/python-2.7.6/lib/python2.7/test/test_userlist.py | 136 | 1894 | # Check every path through every method of UserList
from UserList import UserList
from test import test_support, list_tests
class UserListTest(list_tests.CommonTest):
type2test = UserList
def test_getslice(self):
super(UserListTest, self).test_getslice()
l = [0, 1, 2, 3, 4]
u = self.type2test(l)
for i in range(-3, 6):
self.assertEqual(u[:i], l[:i])
self.assertEqual(u[i:], l[i:])
for j in xrange(-3, 6):
self.assertEqual(u[i:j], l[i:j])
def test_add_specials(self):
u = UserList("spam")
u2 = u + "eggs"
self.assertEqual(u2, list("spameggs"))
def test_radd_specials(self):
u = UserList("eggs")
u2 = "spam" + u
self.assertEqual(u2, list("spameggs"))
u2 = u.__radd__(UserList("spam"))
self.assertEqual(u2, list("spameggs"))
def test_iadd(self):
super(UserListTest, self).test_iadd()
u = [0, 1]
u += UserList([0, 1])
self.assertEqual(u, [0, 1, 0, 1])
def test_mixedcmp(self):
u = self.type2test([0, 1])
self.assertEqual(u, [0, 1])
self.assertNotEqual(u, [0])
self.assertNotEqual(u, [0, 2])
def test_mixedadd(self):
u = self.type2test([0, 1])
self.assertEqual(u + [], u)
self.assertEqual(u + [2], [0, 1, 2])
def test_getitemoverwriteiter(self):
# Verify that __getitem__ overrides *are* recognized by __iter__
class T(self.type2test):
def __getitem__(self, key):
return str(key) + '!!!'
self.assertEqual(iter(T((1,2))).next(), "0!!!")
def test_main():
with test_support.check_py3k_warnings(
(".+__(get|set|del)slice__ has been removed", DeprecationWarning)):
test_support.run_unittest(UserListTest)
if __name__ == "__main__":
test_main()
| mit |
azumimuo/family-xbmc-addon | plugin.video.salts/scrapers/premiumize_scraper.py | 1 | 9013 | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
from salts_lib import kodi
from salts_lib import log_utils
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
from salts_lib.utils2 import i18n
import scraper
BASE_URL = 'https://www.premiumize.me'
VIDEO_EXT = ['MKV', 'AVI', 'MP4']
class Premiumize_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
self.username = kodi.get_setting('%s-username' % (self.get_name()))
self.password = kodi.get_setting('%s-password' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'Premiumize.me'
def resolve_link(self, link):
return link
def format_source_label(self, item):
label = '[%s] %s' % (item['quality'], item['host'])
if 'size' in item:
label += ' (%s)' % (item['size'])
if 'extra' in item:
label += ' [%s]' % (item['extra'])
return label
def get_sources(self, video):
hosters = []
source_url = self.get_url(video)
if source_url and source_url != FORCE_NO_MATCH:
query = urlparse.parse_qs(source_url)
if 'hash' in query:
data = {'hash': query['hash'][0]}
url = urlparse.urljoin(self.base_url, '/torrent/browse')
js_data = self._http_get(url, data=data, cache_limit=1)
if 'data' in js_data and 'content' in js_data['data']:
videos = self.__get_videos(js_data['data']['content'], video)
for video in videos:
host = self._get_direct_hostname(video['url'])
hoster = {'multi-part': False, 'class': self, 'views': None, 'url': video['url'], 'rating': None, 'host': host, 'quality': video['quality'], 'direct': True}
if 'size' in video: hoster['size'] = scraper_utils.format_size(video['size'])
if 'name' in video: hoster['extra'] = video['name']
hosters.append(hoster)
return hosters
def __get_videos(self, contents, video):
videos = []
for key in contents:
item = contents[key]
if item['type'].lower() == 'dir':
videos += self.__get_videos(item['children'], video)
else:
if item['ext'].upper() in VIDEO_EXT and int(item['size']) > (100 * 1024 * 1024):
if video.video_type == VIDEO_TYPES.MOVIE:
_, _, height, _ = scraper_utils.parse_movie_link(item['name'])
else:
_, _, _, height, _ = scraper_utils.parse_episode_link(item['name'])
video = {'name': item['name'], 'size': item['size'], 'url': item['url'], 'quality': scraper_utils.height_get_quality(height)}
videos.append(video)
if item['stream'] is not None:
if int(height) > 720: height = 720
video = {'name': '(Transcode) %s' % (item['name']), 'url': item['stream'], 'quality': scraper_utils.height_get_quality(height)}
videos.append(video)
return videos
def get_url(self, video):
url = None
self.create_db_connection()
result = self.db_connection.get_related_url(video.video_type, video.title, video.year, self.get_name(), video.season, video.episode)
if result:
url = result[0][0]
log_utils.log('Got local related url: |%s|%s|%s|%s|%s|' % (video.video_type, video.title, video.year, self.get_name(), url))
else:
if video.video_type == VIDEO_TYPES.MOVIE:
results = self.search(video.video_type, video.title, video.year)
if results:
url = results[0]['url']
self.db_connection.set_related_url(video.video_type, video.title, video.year, self.get_name(), url)
else:
url = self._get_episode_url(video)
if url:
self.db_connection.set_related_url(video.video_type, video.title, video.year, self.get_name(), url, video.season, video.episode)
return url
def _get_episode_url(self, video):
url = urlparse.urljoin(self.base_url, '/torrent/list')
js_data = self._http_get(url, cache_limit=0)
norm_title = scraper_utils.normalize_title(video.title)
if 'torrents' in js_data:
airdate_fallback = kodi.get_setting('airdate-fallback') == 'true' and video.ep_airdate
show_title = ''
if not scraper_utils.force_title(video):
for item in js_data['torrents']:
sxe_pattern = '(.*?)[. ][Ss]%02d[Ee]%02d[. ]' % (int(video.season), int(video.episode))
match = re.search(sxe_pattern, item['name'])
if match:
show_title = match.group(1)
elif airdate_fallback:
airdate_pattern = '(.*?)[. ]%s[. ]%02d[. ]%02d[. ]' % (video.ep_airdate.year, video.ep_airdate.month, video.ep_airdate.day)
match = re.search(airdate_pattern, item['name'])
if match:
show_title = match.group(1)
if show_title and norm_title in scraper_utils.normalize_title(show_title):
return 'hash=%s' % (item['hash'])
def search(self, video_type, title, year):
url = urlparse.urljoin(self.base_url, '/torrent/list')
js_data = self._http_get(url, cache_limit=0)
norm_title = scraper_utils.normalize_title(title)
results = []
if 'torrents' in js_data:
for item in js_data['torrents']:
if re.search('[._ ]S\d+E\d+[._ ]', item['name']): continue # skip episodes for movies
match = re.search('(.*?)\(?(\d{4})\)?(.*)', item['name'])
if match:
match_title, match_year, extra = match.groups()
else:
match_title, match_year, extra = item['name'], '', ''
match_title = match_title.strip()
extra = extra.strip()
if norm_title in scraper_utils.normalize_title(match_title) and (not year or not match_year or year == match_year):
result_title = match_title
if extra: result_title += ' [%s]' % (extra)
result = {'title': result_title, 'year': match_year, 'url': 'hash=%s' % (item['hash'])}
results.append(result)
return results
@classmethod
def get_settings(cls):
settings = super(cls, cls).get_settings()
settings = scraper_utils.disable_sub_check(settings)
name = cls.get_name()
settings.append(' <setting id="%s-username" type="text" label=" %s" default="" visible="eq(-4,true)"/>' % (name, i18n('username')))
settings.append(' <setting id="%s-password" type="text" label=" %s" option="hidden" default="" visible="eq(-5,true)"/>' % (name, i18n('password')))
return settings
def _http_get(self, url, data=None, retry=True, allow_redirect=True, cache_limit=8):
if not self.username or not self.password:
return {}
if data is None: data = {}
data.update({'customer_id': self.username, 'pin': self.password})
result = super(Premiumize_Scraper, self)._http_get(url, data=data, allow_redirect=allow_redirect, cache_limit=cache_limit)
js_result = scraper_utils.parse_json(result, url)
if 'status' in js_result and js_result['status'] == 'error':
log_utils.log('Error received from premiumize.me (%s)' % (js_result.get('message', 'Unknown Error')), log_utils.LOGWARNING)
js_result = {}
return js_result
| gpl-2.0 |
pattisdr/osf.io | osf_tests/test_analytics.py | 3 | 6065 | # -*- coding: utf-8 -*-
"""
Unit tests for analytics logic in framework/analytics/__init__.py
"""
import mock
import re
import pytest
from django.utils import timezone
from nose.tools import * # noqa: F403
from datetime import datetime
from addons.osfstorage.models import OsfStorageFile
from framework import analytics
from osf.models import PageCounter
from tests.base import OsfTestCase
from osf_tests.factories import UserFactory, ProjectFactory
class TestAnalytics(OsfTestCase):
def test_get_total_activity_count(self):
user = UserFactory()
date = timezone.now()
assert_equal(analytics.get_total_activity_count(user._id), 0)
assert_equal(analytics.get_total_activity_count(user._id), user.get_activity_points())
analytics.increment_user_activity_counters(user._id, 'project_created', date.isoformat())
assert_equal(analytics.get_total_activity_count(user._id), 1)
assert_equal(analytics.get_total_activity_count(user._id), user.get_activity_points())
def test_increment_user_activity_counters(self):
user = UserFactory()
date = timezone.now()
assert_equal(user.get_activity_points(), 0)
analytics.increment_user_activity_counters(user._id, 'project_created', date.isoformat())
assert_equal(user.get_activity_points(), 1)
@pytest.fixture()
def user():
return UserFactory()
@pytest.fixture()
def project(user):
return ProjectFactory(creator=user)
@pytest.fixture()
def file_node(project):
file_node = OsfStorageFile(name='test', target=project)
file_node.save()
return file_node
@pytest.fixture()
def file_node2(project):
file_node2 = OsfStorageFile(name='test2', target=project)
file_node2.save()
return file_node2
@pytest.fixture()
def file_node3(project):
file_node3 = OsfStorageFile(name='test3', target=project)
file_node3.save()
return file_node3
@pytest.fixture()
def page_counter(project, file_node):
page_counter_id = 'download:{}:{}'.format(project._id, file_node.id)
page_counter, created = PageCounter.objects.get_or_create(_id=page_counter_id, date={u'2018/02/04': {u'total': 41, u'unique': 33}})
return page_counter
@pytest.fixture()
def page_counter2(project, file_node2):
page_counter_id = 'download:{}:{}'.format(project._id, file_node2.id)
page_counter, created = PageCounter.objects.get_or_create(_id=page_counter_id, date={u'2018/02/04': {u'total': 4, u'unique': 26}})
return page_counter
@pytest.fixture()
def page_counter_for_individual_version(project, file_node3):
page_counter_id = 'download:{}:{}:0'.format(project._id, file_node3.id)
page_counter, created = PageCounter.objects.get_or_create(_id=page_counter_id, date={u'2018/02/04': {u'total': 1, u'unique': 1}})
return page_counter
@pytest.mark.django_db
class TestPageCounter:
@mock.patch('osf.models.analytics.session')
def test_download_update_counter(self, mock_session, project, file_node):
mock_session.data = {}
page_counter_id = 'download:{}:{}'.format(project._id, file_node.id)
PageCounter.update_counter(page_counter_id, {})
page_counter = PageCounter.objects.get(_id=page_counter_id)
assert page_counter.total == 1
assert page_counter.unique == 1
PageCounter.update_counter(page_counter_id, {})
page_counter.refresh_from_db()
assert page_counter.total == 2
assert page_counter.unique == 1
@mock.patch('osf.models.analytics.session')
def test_download_update_counter_contributor(self, mock_session, user, project, file_node):
mock_session.data = {'auth_user_id': user._id}
page_counter_id = 'download:{}:{}'.format(project._id, file_node.id)
PageCounter.update_counter(page_counter_id, {'contributors': project.contributors})
page_counter = PageCounter.objects.get(_id=page_counter_id)
assert page_counter.total == 0
assert page_counter.unique == 0
PageCounter.update_counter(page_counter_id, {'contributors': project.contributors})
page_counter.refresh_from_db()
assert page_counter.total == 0
assert page_counter.unique == 0
def test_get_all_downloads_on_date(self, page_counter, page_counter2):
"""
This method tests that multiple pagecounter objects have their download totals summed properly.
:param page_counter: represents a page_counter for a file node being downloaded
:param page_counter2: represents a page_counter for another file node being downloaded
"""
date = datetime(2018, 2, 4)
total_downloads = PageCounter.get_all_downloads_on_date(date)
assert total_downloads == 45
def test_get_all_downloads_on_date_exclude_versions(self, page_counter, page_counter2, page_counter_for_individual_version):
"""
This method tests that individual version counts for file node's aren't "double counted" in the totals
for a page counter. We don't add the file node's total to the versions total.
:param page_counter: represents a page_counter for a file node being downloaded
:param page_counter2: represents a page_counter for another file node being downloaded
"""
date = datetime(2018, 2, 4)
total_downloads = PageCounter.get_all_downloads_on_date(date)
assert total_downloads == 45
class TestPageCounterRegex:
def test_download_all_versions_regex(self):
# Checks regex to ensure we don't double count versions totals for that file node.
match = re.match(PageCounter.DOWNLOAD_ALL_VERSIONS_ID_PATTERN, 'bad id')
assert not match
match = re.match(PageCounter.DOWNLOAD_ALL_VERSIONS_ID_PATTERN, 'views:guid1:fileid')
assert not match
match = re.match(PageCounter.DOWNLOAD_ALL_VERSIONS_ID_PATTERN, 'download:guid1:fileid:0')
assert not match
match = re.match(PageCounter.DOWNLOAD_ALL_VERSIONS_ID_PATTERN, 'download:guid1:fileid')
assert match
| apache-2.0 |
jymannob/CouchPotatoServer | couchpotato/core/helpers/namer_check.py | 6 | 5142 | #Namer Check routine by sarakha63
from xml.dom.minidom import parseString
from xml.dom.minidom import Node
import cookielib
import urllib
import urllib2
import re
import time
from datetime import datetime
from bs4 import BeautifulSoup
from couchpotato.core.helpers.variable import getTitle, tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.helpers.encoding import simplifyString, tryUrlencode, toUnicode
from couchpotato.core.helpers.variable import getTitle, mergeDicts
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
from dateutil.parser import parse
from guessit import guess_movie_info
from couchpotato.core.event import fireEvent
log = CPLog(__name__)
clean = '[ _\,\.\(\)\[\]\-](extended.cut|directors.cut|french|by|ioaw|swedisch|danish|dutch|swesub|spanish|german|ac3|dts|custom|dc|divx|divx5|dsr|dsrip|dutch|dvd|dvdr|dvdrip|dvdscr|dvdscreener|screener|dvdivx|cam|fragment|fs|hdtv|hdrip|hdtvrip|internal|limited|multisubs|ntsc|ogg|ogm|pal|pdtv|proper|repack|rerip|retail|r3|r5|bd5|se|svcd|swedish|german|read.nfo|nfofix|unrated|ws|telesync|ts|telecine|tc|brrip|bdrip|video_ts|audio_ts|480p|480i|576p|576i|720p|720i|1080p|1080i|hrhd|hrhdtv|hddvd|full|multi|bluray|x264|h264|xvid|xvidvd|xxx|www.www|cd[1-9]|\[.*\])([ _\,\.\(\)\[\]\-]|$)'
multipart_regex = [
'[ _\.-]+cd[ _\.-]*([0-9a-d]+)', #*cd1
'[ _\.-]+dvd[ _\.-]*([0-9a-d]+)', #*dvd1
'[ _\.-]+part[ _\.-]*([0-9a-d]+)', #*part1
'[ _\.-]+dis[ck][ _\.-]*([0-9a-d]+)', #*disk1
'cd[ _\.-]*([0-9a-d]+)$', #cd1.ext
'dvd[ _\.-]*([0-9a-d]+)$', #dvd1.ext
'part[ _\.-]*([0-9a-d]+)$', #part1.mkv
'dis[ck][ _\.-]*([0-9a-d]+)$', #disk1.mkv
'()[ _\.-]+([0-9]*[abcd]+)(\.....?)$',
'([a-z])([0-9]+)(\.....?)$',
'()([ab])(\.....?)$' #*a.mkv
]
def correctName(check_name, movie):
MovieTitles = movie['info']['titles']
result=0
for movietitle in MovieTitles:
check_names = [simplifyString(check_name)]
# Match names between "
try: check_names.append(re.search(r'([\'"])[^\1]*\1', check_name).group(0))
except: pass
# Match longest name between []
try: check_names.append(max(check_name.split('['), key = len))
except: pass
for check_name in list(set(check_names)):
check_movie = getReleaseNameYear(check_name)
try:
check_words = filter(None, re.split('\W+', simplifyString(check_movie.get('name', ''))))
movie_words = filter(None, re.split('\W+', simplifyString(movietitle)))
if len(check_words) > 0 and len(movie_words) > 0 and len(list(set(check_words) - set(movie_words))) == 0:
result+=1
except:
pass
result+=0
return result
def getReleaseNameYear(release_name, file_name = None):
# Use guessit first
guess = {}
if release_name:
release_name = re.sub(clean, ' ', release_name.lower())
try:
guess = guess_movie_info(toUnicode(release_name))
if guess.get('title') and guess.get('year'):
guess = {
'name': guess.get('title'),
'year': guess.get('year'),
}
elif guess.get('title'):
guess = {
'name': guess.get('title'),
'year': 0,
}
except:
log.debug('Could not detect via guessit "%s": %s', (file_name, traceback.format_exc()))
# Backup to simple
cleaned = ' '.join(re.split('\W+', simplifyString(release_name)))
for i in range(1,4):
cleaned = re.sub(clean, ' ', cleaned)
cleaned = re.sub(clean, ' ', cleaned)
year = findYear(cleaned)
cp_guess = {}
if year: # Split name on year
try:
movie_name = cleaned.split(year).pop(0).strip()
cp_guess = {
'name': movie_name,
'year': int(year),
}
except:
pass
else: # Split name on multiple spaces
try:
movie_name = cleaned.split(' ').pop(0).strip()
cp_guess = {
'name': movie_name,
'year': 0,
}
except:
pass
if cp_guess.get('year') == guess.get('year') and len(cp_guess.get('name', '')) > len(guess.get('name', '')):
return guess
elif guess == {}:
return cp_guess
if cp_guess.get('year') == guess.get('year') and len(cp_guess.get('name', '')) < len(guess.get('name', '')):
return cp_guess
return guess
def findYear(text):
matches = re.search('(?P<year>19[0-9]{2}|20[0-9]{2})', text)
if matches:
return matches.group('year')
return '' | gpl-3.0 |
DefyVentures/edx-platform | common/djangoapps/third_party_auth/tests/test_pipeline.py | 78 | 1656 | """Unit tests for third_party_auth/pipeline.py."""
import random
from third_party_auth import pipeline, provider
from third_party_auth.tests import testutil
# Allow tests access to protected methods (or module-protected methods) under
# test. pylint: disable-msg=protected-access
class MakeRandomPasswordTest(testutil.TestCase):
"""Tests formation of random placeholder passwords."""
def setUp(self):
super(MakeRandomPasswordTest, self).setUp()
self.seed = 1
def test_default_args(self):
self.assertEqual(pipeline._DEFAULT_RANDOM_PASSWORD_LENGTH, len(pipeline.make_random_password()))
def test_probably_only_uses_charset(self):
# This is ultimately probablistic since we could randomly select a good character 100000 consecutive times.
for char in pipeline.make_random_password(length=100000):
self.assertIn(char, pipeline._PASSWORD_CHARSET)
def test_pseudorandomly_picks_chars_from_charset(self):
random_instance = random.Random(self.seed)
expected = ''.join(
random_instance.choice(pipeline._PASSWORD_CHARSET)
for _ in xrange(pipeline._DEFAULT_RANDOM_PASSWORD_LENGTH))
random_instance.seed(self.seed)
self.assertEqual(expected, pipeline.make_random_password(choice_fn=random_instance.choice))
class ProviderUserStateTestCase(testutil.TestCase):
"""Tests ProviderUserState behavior."""
def test_get_unlink_form_name(self):
state = pipeline.ProviderUserState(provider.GoogleOauth2, object(), False)
self.assertEqual(provider.GoogleOauth2.NAME + '_unlink_form', state.get_unlink_form_name())
| agpl-3.0 |
hoatle/odoo | addons/edi/models/res_currency.py | 437 | 2892 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011-2012 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from edi import EDIMixin
from openerp import SUPERUSER_ID
RES_CURRENCY_EDI_STRUCT = {
#custom: 'code'
'symbol': True,
'rate': True,
}
class res_currency(osv.osv, EDIMixin):
_inherit = "res.currency"
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
edi_struct = dict(edi_struct or RES_CURRENCY_EDI_STRUCT)
edi_doc_list = []
for currency in records:
# Get EDI doc based on struct. The result will also contain all metadata fields and attachments.
edi_doc = super(res_currency,self).edi_export(cr, uid, [currency], edi_struct, context)[0]
edi_doc.update(code=currency.name)
edi_doc_list.append(edi_doc)
return edi_doc_list
def edi_import(self, cr, uid, edi_document, context=None):
self._edi_requires_attributes(('code','symbol'), edi_document)
external_id = edi_document['__id']
existing_currency = self._edi_get_object_by_external_id(cr, uid, external_id, 'res_currency', context=context)
if existing_currency:
return existing_currency.id
# find with unique ISO code
existing_ids = self.search(cr, uid, [('name','=',edi_document['code'])])
if existing_ids:
return existing_ids[0]
# nothing found, create a new one
currency_id = self.create(cr, SUPERUSER_ID, {'name': edi_document['code'],
'symbol': edi_document['symbol']}, context=context)
rate = edi_document.pop('rate')
if rate:
self.pool.get('res.currency.rate').create(cr, SUPERUSER_ID, {'currency_id': currency_id,
'rate': rate}, context=context)
return currency_id
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Fusion-Rom/android_external_chromium_org | build/android/pylib/host_driven/tests_annotations.py | 118 | 2789 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Annotations for host-driven tests."""
# pylint: disable=W0212
import os
class AnnotatedFunctions(object):
"""A container for annotated methods."""
_ANNOTATED = {}
@staticmethod
def _AddFunction(annotation, function):
"""Adds an annotated function to our container.
Args:
annotation: the annotation string.
function: the function.
Returns:
The function passed in.
"""
module_name = os.path.splitext(os.path.basename(
function.__globals__['__file__']))[0]
qualified_function_name = '.'.join([module_name, function.func_name])
function_list = AnnotatedFunctions._ANNOTATED.get(annotation, [])
function_list.append(qualified_function_name)
AnnotatedFunctions._ANNOTATED[annotation] = function_list
return function
@staticmethod
def IsAnnotated(annotation, qualified_function_name):
"""True if function name (module.function) contains the annotation.
Args:
annotation: the annotation string.
qualified_function_name: the qualified function name.
Returns:
True if module.function contains the annotation.
"""
return qualified_function_name in AnnotatedFunctions._ANNOTATED.get(
annotation, [])
@staticmethod
def GetTestAnnotations(qualified_function_name):
"""Returns a list containing all annotations for the given function.
Args:
qualified_function_name: the qualified function name.
Returns:
List of all annotations for this function.
"""
return [annotation
for annotation, tests in AnnotatedFunctions._ANNOTATED.iteritems()
if qualified_function_name in tests]
# The following functions are annotations used for the host-driven tests.
def Smoke(function):
return AnnotatedFunctions._AddFunction('Smoke', function)
def SmallTest(function):
return AnnotatedFunctions._AddFunction('SmallTest', function)
def MediumTest(function):
return AnnotatedFunctions._AddFunction('MediumTest', function)
def LargeTest(function):
return AnnotatedFunctions._AddFunction('LargeTest', function)
def EnormousTest(function):
return AnnotatedFunctions._AddFunction('EnormousTest', function)
def FlakyTest(function):
return AnnotatedFunctions._AddFunction('FlakyTest', function)
def DisabledTest(function):
return AnnotatedFunctions._AddFunction('DisabledTest', function)
def Feature(feature_list):
def _AddFeatures(function):
for feature in feature_list:
AnnotatedFunctions._AddFunction('Feature:%s' % feature, function)
return AnnotatedFunctions._AddFunction('Feature', function)
return _AddFeatures
| bsd-3-clause |
Changaco/oh-mainline | vendor/packages/oauthlib/oauthlib/oauth1/rfc5849/utils.py | 93 | 2780 | # -*- coding: utf-8 -*-
"""
oauthlib.utils
~~~~~~~~~~~~~~
This module contains utility methods used by various parts of the OAuth
spec.
"""
from __future__ import absolute_import, unicode_literals
try:
import urllib2
except ImportError:
import urllib.request as urllib2
from oauthlib.common import quote, unquote, bytes_type, unicode_type
UNICODE_ASCII_CHARACTER_SET = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789')
def filter_params(target):
"""Decorator which filters params to remove non-oauth_* parameters
Assumes the decorated method takes a params dict or list of tuples as its
first argument.
"""
def wrapper(params, *args, **kwargs):
params = filter_oauth_params(params)
return target(params, *args, **kwargs)
wrapper.__doc__ = target.__doc__
return wrapper
def filter_oauth_params(params):
"""Removes all non oauth parameters from a dict or a list of params."""
is_oauth = lambda kv: kv[0].startswith("oauth_")
if isinstance(params, dict):
return list(filter(is_oauth, list(params.items())))
else:
return list(filter(is_oauth, params))
def escape(u):
"""Escape a unicode string in an OAuth-compatible fashion.
Per `section 3.6`_ of the spec.
.. _`section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
"""
if not isinstance(u, unicode_type):
raise ValueError('Only unicode objects are escapable. ' +
'Got %s of type %s.' % (u, type(u)))
# Letters, digits, and the characters '_.-' are already treated as safe
# by urllib.quote(). We need to add '~' to fully support rfc5849.
return quote(u, safe=b'~')
def unescape(u):
if not isinstance(u, unicode_type):
raise ValueError('Only unicode objects are unescapable.')
return unquote(u)
def parse_keqv_list(l):
"""A unicode-safe version of urllib2.parse_keqv_list"""
# With Python 2.6, parse_http_list handles unicode fine
return urllib2.parse_keqv_list(l)
def parse_http_list(u):
"""A unicode-safe version of urllib2.parse_http_list"""
# With Python 2.6, parse_http_list handles unicode fine
return urllib2.parse_http_list(u)
def parse_authorization_header(authorization_header):
"""Parse an OAuth authorization header into a list of 2-tuples"""
auth_scheme = 'OAuth '.lower()
if authorization_header[:len(auth_scheme)].lower().startswith(auth_scheme):
items = parse_http_list(authorization_header[len(auth_scheme):])
try:
return list(parse_keqv_list(items).items())
except (IndexError, ValueError):
pass
raise ValueError('Malformed authorization header')
| agpl-3.0 |
codoo/vertical-community | __TODO__/project_crowdfunding/project_crowdfunding.py | 5 | 1089 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Buron
# Copyright 2013 Yannick Buron
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm
class ProjectProject(orm.Model):
_name = 'project.project'
_inherit = ['project.project', 'crowdfunding.campaign']
| agpl-3.0 |
vitmod/enigma2-test | lib/python/Screens/EpgSelection.py | 6 | 59871 | from time import localtime, time, strftime, mktime
from enigma import eServiceReference, eTimer, eServiceCenter, ePoint
from Screen import Screen
from Screens.HelpMenu import HelpableScreen
from Components.About import about
from Components.ActionMap import HelpableActionMap, HelpableNumberActionMap
from Components.Button import Button
from Components.config import config, configfile, ConfigClock
from Components.EpgList import EPGList, EPGBouquetList, TimelineText, EPG_TYPE_SINGLE, EPG_TYPE_SIMILAR, EPG_TYPE_MULTI, EPG_TYPE_ENHANCED, EPG_TYPE_INFOBAR, EPG_TYPE_INFOBARGRAPH, EPG_TYPE_GRAPH, MAX_TIMELINES
from Components.Label import Label
from Components.Pixmap import Pixmap
from Components.Sources.ServiceEvent import ServiceEvent
from Components.Sources.Event import Event
from Components.UsageConfig import preferredTimerPath
from Screens.TimerEdit import TimerSanityConflict
from Screens.EventView import EventViewEPGSelect, EventViewSimple
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from Screens.PictureInPicture import PictureInPicture
from Screens.Setup import Setup
from TimeDateInput import TimeDateInput
from RecordTimer import RecordTimerEntry, parseEvent, AFTEREVENT
from TimerEntry import TimerEntry, InstantRecordTimerEntry
from ServiceReference import ServiceReference
from Tools.HardwareInfo import HardwareInfo
mepg_config_initialized = False
# PiPServiceRelation installed?
try:
from Plugins.SystemPlugins.PiPServiceRelation.plugin import getRelationDict
plugin_PiPServiceRelation_installed = True
except:
plugin_PiPServiceRelation_installed = False
class EPGSelection(Screen, HelpableScreen):
EMPTY = 0
ADD_TIMER = 1
REMOVE_TIMER = 2
ZAP = 1
def __init__(self, session, service = None, zapFunc = None, eventid = None, bouquetChangeCB=None, serviceChangeCB = None, EPGtype = None, StartBouquet = None, StartRef = None, bouquets = None):
Screen.__init__(self, session)
self.setTitle(_('EPG Selection'))
HelpableScreen.__init__(self)
self.zapFunc = zapFunc
self.serviceChangeCB = serviceChangeCB
self.bouquets = bouquets
graphic = False
if EPGtype == 'single':
self.type = EPG_TYPE_SINGLE
elif EPGtype == 'infobar':
self.type = EPG_TYPE_INFOBAR
elif EPGtype == 'enhanced':
self.type = EPG_TYPE_ENHANCED
elif EPGtype == 'graph':
self.type = EPG_TYPE_GRAPH
if config.epgselection.graph_type_mode.value == "graphics":
graphic = True
elif EPGtype == 'infobargraph':
self.type = EPG_TYPE_INFOBARGRAPH
if config.epgselection.infobar_type_mode.value == "graphics":
graphic = True
elif EPGtype == 'multi':
self.type = EPG_TYPE_MULTI
else:
self.type = EPG_TYPE_SIMILAR
if not self.type == EPG_TYPE_SINGLE:
self.StartBouquet = StartBouquet
self.StartRef = StartRef
self.servicelist = None
self.ChoiceBoxDialog = None
self.ask_time = -1
self.closeRecursive = False
self.eventviewDialog = None
self.eventviewWasShown = False
self.currch = None
self.session.pipshown = False
self.cureventindex = None
if plugin_PiPServiceRelation_installed:
self.pipServiceRelation = getRelationDict()
else:
self.pipServiceRelation = {}
self.zapnumberstarted = False
self.NumberZapTimer = eTimer()
self.NumberZapTimer.callback.append(self.dozumberzap)
self.NumberZapField = None
self.CurrBouquet = None
self.CurrService = None
self["number"] = Label()
self["number"].hide()
self['Service'] = ServiceEvent()
self['Event'] = Event()
self['lab1'] = Label(_('Please wait while gathering data...'))
self.key_green_choice = self.EMPTY
self['key_red'] = Button(_('IMDb Search'))
self['key_green'] = Button(_('Add Timer'))
self['key_yellow'] = Button(_('EPG Search'))
self['key_blue'] = Button(_('Add AutoTimer'))
self['dialogactions'] = HelpableActionMap(self, 'WizardActions',
{
'back': (self.closeChoiceBoxDialog, _('Close dialog')),
}, -1)
self['dialogactions'].csel = self
self["dialogactions"].setEnabled(False)
self['okactions'] = HelpableActionMap(self, 'OkCancelActions',
{
'cancel': (self.closeScreen, _('Exit EPG')),
'OK': (self.OK, _('Zap to channel (setup in menu)')),
'OKLong': (self.OKLong, _('Zap to channel and close (setup in menu)'))
}, -1)
self['okactions'].csel = self
self['colouractions'] = HelpableActionMap(self, 'ColorActions',
{
'red': (self.redButtonPressed, _('IMDB search for current event')),
'redlong': (self.redButtonPressedLong, _('Sort EPG List')),
'green': (self.greenButtonPressed, _('Add/Remove timer for current event')),
'greenlong': (self.greenButtonPressedLong, _('Show Timer List')),
'yellow': (self.yellowButtonPressed, _('Search for similar events')),
'blue': (self.blueButtonPressed, _('Add a auto timer for current event')),
'bluelong': (self.blueButtonPressedLong, _('Show AutoTimer List'))
}, -1)
self['colouractions'].csel = self
self['recordingactions'] = HelpableActionMap(self, 'InfobarInstantRecord',
{
'ShortRecord': (self.recButtonPressed, _('Add a record timer for current event')),
'LongRecord': (self.recButtonPressedLong, _('Add a zap timer for current event'))
}, -1)
self['recordingactions'].csel = self
if self.type == EPG_TYPE_SIMILAR:
self.currentService = service
self.eventid = eventid
self['epgactions'] = HelpableActionMap(self, 'EPGSelectActions',
{
'info': (self.Info, _('Show detailed event info')),
'infolong': (self.InfoLong, _('Show single epg for current channel')),
'menu': (self.createSetup, _('Setup menu'))
}, -1)
self['epgactions'].csel = self
elif self.type == EPG_TYPE_SINGLE:
self.currentService = ServiceReference(service)
self['epgactions'] = HelpableActionMap(self, 'EPGSelectActions',
{
'nextService': (self.nextService, _('Goto next channel')),
'prevService': (self.prevService, _('Goto previous channel')),
'info': (self.Info, _('Show detailed event info')),
'epg': (self.Info, _('Show detailed event info')),
'menu': (self.createSetup, _('Setup menu'))
}, -1)
self['epgactions'].csel = self
self['epgcursoractions'] = HelpableActionMap(self, 'DirectionActions',
{
'left': (self.prevPage, _('Move up a page')),
'right': (self.nextPage, _('Move down a page')),
'up': (self.moveUp, _('Goto previous channel')),
'down': (self.moveDown, _('Goto next channel'))
}, -1)
self['epgcursoractions'].csel = self
elif self.type == EPG_TYPE_INFOBAR or self.type == EPG_TYPE_ENHANCED:
if self.type == EPG_TYPE_INFOBAR:
self.skinName = 'QuickEPG'
self['epgactions'] = HelpableActionMap(self, 'EPGSelectActions',
{
'nextBouquet': (self.nextBouquet, _('Goto next bouquet')),
'prevBouquet': (self.prevBouquet, _('Goto previous bouquet')),
'nextService': (self.nextPage, _('Move down a page')),
'prevService': (self.prevPage, _('Move up a page')),
'input_date_time': (self.enterDateTime, _('Goto specific data/time')),
'epg': (self.epgButtonPressed, _('Show single epg for current channel')),
'info': (self.Info, _('Show detailed event info')),
'infolong': (self.InfoLong, _('Show single epg for current channel')),
'menu': (self.createSetup, _('Setup menu'))
}, -1)
self['epgactions'].csel = self
self['epgcursoractions'] = HelpableActionMap(self, 'DirectionActions',
{
'left': (self.prevService, _('Goto previous channel')),
'right': (self.nextService, _('Goto next channel')),
'up': (self.moveUp, _('Goto previous channel')),
'down': (self.moveDown, _('Goto next channel'))
}, -1)
self['epgcursoractions'].csel = self
elif self.type == EPG_TYPE_ENHANCED:
self['epgactions'] = HelpableActionMap(self, 'EPGSelectActions',
{
'nextBouquet': (self.nextBouquet, _('Goto next bouquet')),
'prevBouquet': (self.prevBouquet, _('Goto previous bouquet')),
'nextService': (self.nextService, _('Goto next channel')),
'prevService': (self.prevService, _('Goto previous channel')),
'input_date_time': (self.enterDateTime, _('Goto specific data/time')),
'info': (self.Info, _('Show detailed event info')),
'infolong': (self.InfoLong, _('Show single epg for current channel')),
'epg': (self.Info, _('Show detailed event info')),
'menu': (self.createSetup, _('Setup menu'))
}, -1)
self['epgactions'].csel = self
self['epgcursoractions'] = HelpableActionMap(self, 'DirectionActions',
{
'left': (self.prevPage, _('Move up a page')),
'right': (self.nextPage, _('Move down a page')),
'up': (self.moveUp, _('Goto previous channel')),
'down': (self.moveDown, _('Goto next channel'))
}, -1)
self['epgcursoractions'].csel = self
self['input_actions'] = HelpableNumberActionMap(self, 'NumberActions',
{
'0': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'1': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'2': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'3': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'4': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'5': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'6': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'7': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'8': (self.keyNumberGlobal, _('enter number to jump to channel.')),
'9': (self.keyNumberGlobal, _('enter number to jump to channel.'))
}, -1)
self['input_actions'].csel = self
self.list = []
self.servicelist = service
self.currentService = self.session.nav.getCurrentlyPlayingServiceOrGroup()
elif self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
if self.type == EPG_TYPE_GRAPH:
if not config.epgselection.graph_pig.value:
self.skinName = 'GraphicalEPG'
else:
self.skinName = 'GraphicalEPGPIG'
elif self.type == EPG_TYPE_INFOBARGRAPH:
self.skinName = 'GraphicalInfoBarEPG'
now = time() - int(config.epg.histminutes.value) * 60
if self.type == EPG_TYPE_GRAPH:
self.ask_time = self.ask_time = now - now % (int(config.epgselection.graph_roundto.value) * 60)
elif self.type == EPG_TYPE_INFOBARGRAPH:
self.ask_time = self.ask_time = now - now % (int(config.epgselection.infobar_roundto.value) * 60)
self.closeRecursive = False
self.bouquetlist_active = False
self['bouquetlist'] = EPGBouquetList(graphic=graphic)
self['bouquetlist'].hide()
self['timeline_text'] = TimelineText(type=self.type,graphic=graphic)
self['Event'] = Event()
self['primetime'] = Label(_('PRIMETIME'))
self['change_bouquet'] = Label(_('CHANGE BOUQUET'))
self['jump'] = Label(_('JUMP 24 HOURS'))
self['page'] = Label(_('PAGE UP/DOWN'))
self.time_lines = []
for x in range(0, MAX_TIMELINES):
pm = Pixmap()
self.time_lines.append(pm)
self['timeline%d' % x] = pm
self['timeline_now'] = Pixmap()
self.updateTimelineTimer = eTimer()
self.updateTimelineTimer.callback.append(self.moveTimeLines)
self.updateTimelineTimer.start(60000)
self['bouquetokactions'] = HelpableActionMap(self, 'OkCancelActions',
{
'cancel': (self.BouquetlistHide, _('Close bouquet list.')),
'OK': (self.BouquetOK, _('Change to bouquet')),
}, -1)
self['bouquetokactions'].csel = self
self["bouquetokactions"].setEnabled(False)
self['bouquetcursoractions'] = HelpableActionMap(self, 'DirectionActions',
{
'left': (self.moveBouquetPageUp, _('Goto previous event')),
'right': (self.moveBouquetPageDown, _('Goto next event')),
'up': (self.moveBouquetUp, _('Goto previous channel')),
'down': (self.moveBouquetDown, _('Goto next channel'))
}, -1)
self['bouquetcursoractions'].csel = self
self["bouquetcursoractions"].setEnabled(False)
self['epgcursoractions'] = HelpableActionMap(self, 'DirectionActions',
{
'left': (self.leftPressed, _('Goto previous event')),
'right': (self.rightPressed, _('Goto next event')),
'up': (self.moveUp, _('Goto previous channel')),
'down': (self.moveDown, _('Goto next channel'))
}, -1)
self['epgcursoractions'].csel = self
self['epgactions'] = HelpableActionMap(self, 'EPGSelectActions',
{
'nextService': (self.nextService, _('Jump forward 24 hours')),
'prevService': (self.prevService, _('Jump back 24 hours')),
'nextBouquet': (self.nextBouquet, _('Goto next bouquet')),
'prevBouquet': (self.prevBouquet, _('Goto previous bouquet')),
'input_date_time': (self.enterDateTime, _('Goto specific data/time')),
'epg': (self.epgButtonPressed, _('Show single epg for current channel')),
'info': (self.Info, _('Show detailed event info')),
'infolong': (self.InfoLong, _('Show single epg for current channel')),
'tv': (self.Bouquetlist, _('Toggle between bouquet/epg lists')),
'tvlong': (self.togglePIG, _('Toggle Picture In Graphics')),
'menu': (self.createSetup, _('Setup menu'))
}, -1)
self['epgactions'].csel = self
self['input_actions'] = HelpableNumberActionMap(self, 'NumberActions',
{
'1': (self.keyNumberGlobal, _('Reduce time scale')),
'2': (self.keyNumberGlobal, _('Page up')),
'3': (self.keyNumberGlobal, _('Increase time scale')),
'4': (self.keyNumberGlobal, _('page left')),
'5': (self.keyNumberGlobal, _('Jump to current time')),
'6': (self.keyNumberGlobal, _('Page right')),
'7': (self.keyNumberGlobal, _('No of items switch (increase or reduced)')),
'8': (self.keyNumberGlobal, _('Page down')),
'9': (self.keyNumberGlobal, _('Jump to prime time')),
'0': (self.keyNumberGlobal, _('Move to home of list'))
}, -1)
self['input_actions'].csel = self
elif self.type == EPG_TYPE_MULTI:
self.skinName = 'EPGSelectionMulti'
self['bouquetlist'] = EPGBouquetList(graphic=graphic)
self['bouquetlist'].hide()
self['now_button'] = Pixmap()
self['next_button'] = Pixmap()
self['more_button'] = Pixmap()
self['now_button_sel'] = Pixmap()
self['next_button_sel'] = Pixmap()
self['more_button_sel'] = Pixmap()
self['now_text'] = Label()
self['next_text'] = Label()
self['more_text'] = Label()
self['date'] = Label()
self.bouquetlist_active = False
self['bouquetokactions'] = HelpableActionMap(self, 'OkCancelActions',
{
'OK': (self.BouquetOK, _('Change to bouquet')),
}, -1)
self['bouquetokactions'].csel = self
self["bouquetokactions"].setEnabled(False)
self['bouquetcursoractions'] = HelpableActionMap(self, 'DirectionActions',
{
'left': (self.moveBouquetPageUp, _('Goto previous event')),
'right': (self.moveBouquetPageDown, _('Goto next event')),
'up': (self.moveBouquetUp, _('Goto previous channel')),
'down': (self.moveBouquetDown, _('Goto next channel'))
}, -1)
self['bouquetcursoractions'].csel = self
self['bouquetcursoractions'].setEnabled(False)
self['epgcursoractions'] = HelpableActionMap(self, 'DirectionActions',
{
'left': (self.leftPressed, _('Goto previous event')),
'right': (self.rightPressed, _('Goto next event')),
'up': (self.moveUp, _('Goto previous channel')),
'down': (self.moveDown, _('Goto next channel'))
}, -1)
self['epgcursoractions'].csel = self
self['epgactions'] = HelpableActionMap(self, 'EPGSelectActions',
{
'nextService': (self.nextPage, _('Move down a page')),
'prevService': (self.prevPage, _('Move up a page')),
'nextBouquet': (self.nextBouquet, _('Goto next bouquet')),
'prevBouquet': (self.prevBouquet, _('Goto previous bouquet')),
'input_date_time': (self.enterDateTime, _('Goto specific data/time')),
'epg': (self.epgButtonPressed, _('Show single epg for current channel')),
'info': (self.Info, _('Show detailed event info')),
'infolong': (self.InfoLong, _('Show single epg for current channel')),
'tv': (self.Bouquetlist, _('Toggle between bouquet/epg lists')),
'menu': (self.createSetup, _('Setup menu'))
}, -1)
self['epgactions'].csel = self
if self.type == EPG_TYPE_GRAPH:
time_epoch=int(config.epgselection.graph_prevtimeperiod.value)
elif self.type == EPG_TYPE_INFOBARGRAPH:
time_epoch=int(config.epgselection.infobar_prevtimeperiod.value)
else:
time_epoch=None
self['list'] = EPGList(type=self.type, selChangedCB=self.onSelectionChanged, timer=session.nav.RecordTimer, time_epoch=time_epoch, overjump_empty=config.epgselection.overjump.value, graphic=graphic)
self.refreshTimer = eTimer()
self.refreshTimer.timeout.get().append(self.refreshlist)
self.listTimer = eTimer()
self.listTimer.callback.append(self.hidewaitingtext)
if not HardwareInfo().is_nextgen():
self.createTimer = eTimer()
self.createTimer.callback.append(self.onCreate)
self.onLayoutFinish.append(self.LayoutFinish)
else:
self.onLayoutFinish.append(self.onCreate)
def createSetup(self):
self.closeEventViewDialog()
key = None
if self.type == EPG_TYPE_SINGLE:
key = 'epgsingle'
elif self.type == EPG_TYPE_MULTI:
key = 'epgmulti'
elif self.type == EPG_TYPE_ENHANCED:
key = 'epgenhanced'
elif self.type == EPG_TYPE_INFOBAR:
key = 'epginfobar'
elif self.type == EPG_TYPE_GRAPH:
key = 'epggraphical'
elif self.type == EPG_TYPE_INFOBARGRAPH:
key = 'epginfobargraphical'
if key:
self.session.openWithCallback(self.onSetupClose, Setup, key)
def onSetupClose(self, test = None):
if self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
if self.type == EPG_TYPE_GRAPH:
self.close('reopengraph')
elif self.type == EPG_TYPE_INFOBARGRAPH:
self.close('reopeninfobargraph')
else:
if self.type == EPG_TYPE_INFOBAR:
self.close('reopeninfobar')
def togglePIG(self):
if not config.epgselection.graph_pig.value:
config.epgselection.graph_pig.setValue(True)
else:
config.epgselection.graph_pig.setValue(False)
config.epgselection.graph_pig.save()
configfile.save()
self.close('reopengraph')
def hidewaitingtext(self):
self.listTimer.stop()
if self.type == EPG_TYPE_MULTI:
self['list'].moveToService(self.session.nav.getCurrentlyPlayingServiceOrGroup())
self['lab1'].hide()
def getBouquetServices(self, bouquet):
services = []
servicelist = eServiceCenter.getInstance().list(bouquet)
if not servicelist is None:
while True:
service = servicelist.getNext()
if not service.valid(): #check if end of list
break
if service.flags & (eServiceReference.isDirectory | eServiceReference.isMarker): #ignore non playable services
continue
services.append(ServiceReference(service))
return services
def LayoutFinish(self):
self['lab1'].show()
self.createTimer.start(800)
def onCreate(self):
if not HardwareInfo().is_nextgen():
self.createTimer.stop()
serviceref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
title = None
self['list'].recalcEntrySize()
self.BouquetRoot = False
if self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
if self.StartBouquet.toString().startswith('1:7:0'):
self.BouquetRoot = True
self.services = self.getBouquetServices(self.StartBouquet)
self['list'].fillGraphEPG(self.services, self.ask_time)
self['list'].moveToService(serviceref)
self['list'].setCurrentlyPlaying(serviceref)
self['bouquetlist'].recalcEntrySize()
self['bouquetlist'].fillBouquetList(self.bouquets)
self['bouquetlist'].moveToService(self.StartBouquet)
self['bouquetlist'].setCurrentBouquet(self.StartBouquet )
self.setTitle(self['bouquetlist'].getCurrentBouquet())
if self.type == EPG_TYPE_GRAPH:
self['list'].setShowServiceMode(config.epgselection.graph_servicetitle_mode.value)
self.moveTimeLines()
if config.epgselection.graph_channel1.value:
self['list'].instance.moveSelectionTo(0)
elif self.type == EPG_TYPE_INFOBARGRAPH:
self['list'].setShowServiceMode(config.epgselection.infobar_servicetitle_mode.value)
self.moveTimeLines()
elif self.type == EPG_TYPE_MULTI:
self['bouquetlist'].recalcEntrySize()
self['bouquetlist'].fillBouquetList(self.bouquets)
self['bouquetlist'].moveToService(self.StartBouquet)
self['bouquetlist'].fillBouquetList(self.bouquets)
self.services = self.getBouquetServices(self.StartBouquet)
self['list'].fillMultiEPG(self.services, self.ask_time)
self['list'].setCurrentlyPlaying(serviceref)
self.setTitle(self['bouquetlist'].getCurrentBouquet())
elif self.type == EPG_TYPE_SINGLE or self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR:
if self.type == EPG_TYPE_SINGLE:
service = self.currentService
elif self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR:
service = ServiceReference(self.servicelist.getCurrentSelection())
title = ServiceReference(self.servicelist.getRoot()).getServiceName()
self['Service'].newService(service.ref)
if title:
title = title + ' - ' + service.getServiceName()
else:
title = service.getServiceName()
self.setTitle(title)
self['list'].fillSingleEPG(service)
self['list'].sortSingleEPG(int(config.epgselection.sort.value))
else:
self['list'].fillSimilarList(self.currentService, self.eventid)
self.listTimer.start(10)
def refreshlist(self):
self.refreshTimer.stop()
if self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
self['list'].fillGraphEPG(None, self.ask_time)
self.moveTimeLines()
elif self.type == EPG_TYPE_MULTI:
self['list'].fillMultiEPG(self.services, self.ask_time)
elif self.type == EPG_TYPE_SINGLE or self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR:
try:
if self.type == EPG_TYPE_SINGLE:
service = self.currentService
elif self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR:
service = ServiceReference(self.servicelist.getCurrentSelection())
if not self.cureventindex:
index = self['list'].getCurrentIndex()
else:
index = self.cureventindex
self.cureventindex = None
self['list'].fillSingleEPG(service)
self['list'].sortSingleEPG(int(config.epgselection.sort.value))
self['list'].setCurrentIndex(index)
except:
pass
def moveUp(self):
self['list'].moveTo(self['list'].instance.moveUp)
if self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
self.moveTimeLines(True)
def moveDown(self):
self['list'].moveTo(self['list'].instance.moveDown)
if self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
self.moveTimeLines(True)
def updEvent(self, dir, visible = True):
ret = self['list'].selEntry(dir, visible)
if ret:
self.moveTimeLines(True)
if self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
self.moveTimeLines(True)
def nextPage(self):
self['list'].moveTo(self['list'].instance.pageDown)
def prevPage(self):
self['list'].moveTo(self['list'].instance.pageUp)
def toTop(self):
self['list'].moveTo(self['list'].instance.moveTop)
def toEnd(self):
self['list'].moveTo(self['list'].instance.moveEnd)
def leftPressed(self):
if self.type == EPG_TYPE_MULTI:
self['list'].updateMultiEPG(-1)
else:
self.updEvent(-1)
def rightPressed(self):
if self.type == EPG_TYPE_MULTI:
self['list'].updateMultiEPG(1)
else:
self.updEvent(+1)
def Bouquetlist(self):
if not self.bouquetlist_active:
self.BouquetlistShow()
else:
self.BouquetlistHide()
def BouquetlistShow(self):
self.curindex = self['bouquetlist'].l.getCurrentSelectionIndex()
self["epgcursoractions"].setEnabled(False)
self["okactions"].setEnabled(False)
self['bouquetlist'].show()
self["bouquetokactions"].setEnabled(True)
self["bouquetcursoractions"].setEnabled(True)
self.bouquetlist_active = True
def BouquetlistHide(self, cancel=True):
self["bouquetokactions"].setEnabled(False)
self["bouquetcursoractions"].setEnabled(False)
self['bouquetlist'].hide()
if cancel:
self['bouquetlist'].setCurrentIndex(self.curindex)
self["okactions"].setEnabled(True)
self["epgcursoractions"].setEnabled(True)
self.bouquetlist_active = False
def getCurrentBouquet(self):
if self.BouquetRoot:
return self.StartBouquet
elif self.has_key('bouquetlist'):
cur = self["bouquetlist"].l.getCurrentSelection()
return cur and cur[1]
else:
return self.servicelist.getRoot()
def BouquetOK(self):
self.BouquetRoot = False
now = time() - int(config.epg.histminutes.value) * 60
self.services = self.getBouquetServices(self.getCurrentBouquet())
if self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
if self.type == EPG_TYPE_GRAPH:
self.ask_time = self.ask_time = now - now % (int(config.epgselection.graph_roundto.value) * 60)
elif self.type == EPG_TYPE_INFOBARGRAPH:
self.ask_time = self.ask_time = now - now % (int(config.epgselection.infobar_roundto.value) * 60)
self['list'].resetOffset()
self['list'].fillGraphEPG(self.services, self.ask_time)
self.moveTimeLines(True)
elif self.type == EPG_TYPE_MULTI:
self['list'].fillMultiEPG(self.services, self.ask_time)
self['list'].instance.moveSelectionTo(0)
self.setTitle(self['bouquetlist'].getCurrentBouquet())
self.BouquetlistHide(False)
def moveBouquetUp(self):
self['bouquetlist'].moveTo(self['bouquetlist'].instance.moveUp)
self['bouquetlist'].fillBouquetList(self.bouquets)
def moveBouquetDown(self):
self['bouquetlist'].moveTo(self['bouquetlist'].instance.moveDown)
self['bouquetlist'].fillBouquetList(self.bouquets)
def moveBouquetPageUp(self):
self['bouquetlist'].moveTo(self['bouquetlist'].instance.pageUp)
self['bouquetlist'].fillBouquetList(self.bouquets)
def moveBouquetPageDown(self):
self['bouquetlist'].moveTo(self['bouquetlist'].instance.pageDown)
self['bouquetlist'].fillBouquetList(self.bouquets)
def nextBouquet(self):
if self.type == EPG_TYPE_MULTI or self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
self.moveBouquetDown()
self.BouquetOK()
elif (self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR) and config.usage.multibouquet.value:
self.CurrBouquet = self.servicelist.getCurrentSelection()
self.CurrService = self.servicelist.getRoot()
self.servicelist.nextBouquet()
self.onCreate()
def prevBouquet(self):
if self.type == EPG_TYPE_MULTI or self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
self.moveBouquetUp()
self.BouquetOK()
elif (self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR) and config.usage.multibouquet.value:
self.CurrBouquet = self.servicelist.getCurrentSelection()
self.CurrService = self.servicelist.getRoot()
self.servicelist.prevBouquet()
self.onCreate()
def nextService(self):
if self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR:
self.CurrBouquet = self.servicelist.getCurrentSelection()
self.CurrService = self.servicelist.getRoot()
self['list'].instance.moveSelectionTo(0)
if self.servicelist.inBouquet():
prev = self.servicelist.getCurrentSelection()
if prev:
prev = prev.toString()
while True:
if config.usage.quickzap_bouquet_change.value and self.servicelist.atEnd():
self.servicelist.nextBouquet()
else:
self.servicelist.moveDown()
cur = self.servicelist.getCurrentSelection()
if not cur or (not (cur.flags & 64)) or cur.toString() == prev:
break
else:
self.servicelist.moveDown()
if self.isPlayable():
self.onCreate()
if not self['list'].getCurrent()[1] and config.epgselection.overjump.value:
self.nextService()
else:
self.nextService()
elif self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
self.updEvent(+24)
elif self.serviceChangeCB:
self.serviceChangeCB(1, self)
def prevService(self):
if self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR:
self.CurrBouquet = self.servicelist.getCurrentSelection()
self.CurrService = self.servicelist.getRoot()
self['list'].instance.moveSelectionTo(0)
if self.servicelist.inBouquet():
prev = self.servicelist.getCurrentSelection()
if prev:
prev = prev.toString()
while True:
if config.usage.quickzap_bouquet_change.value:
if self.servicelist.atBegin():
self.servicelist.prevBouquet()
self.servicelist.moveUp()
cur = self.servicelist.getCurrentSelection()
if not cur or (not (cur.flags & 64)) or cur.toString() == prev:
break
else:
self.servicelist.moveUp()
if self.isPlayable():
self.onCreate()
if not self['list'].getCurrent()[1] and config.epgselection.overjump.value:
self.prevService()
else:
self.prevService()
elif self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
self.updEvent(-24)
elif self.serviceChangeCB:
self.serviceChangeCB(-1, self)
def enterDateTime(self):
global mepg_config_initialized
if self.type == EPG_TYPE_MULTI:
if not mepg_config_initialized:
config.misc.prev_mepg_time = ConfigClock(default=time())
mepg_config_initialized = True
self.session.openWithCallback(self.onDateTimeInputClosed, TimeDateInput, config.misc.prev_mepg_time)
elif self.type == EPG_TYPE_GRAPH:
self.session.openWithCallback(self.onDateTimeInputClosed, TimeDateInput, config.epgselection.graph_prevtime)
elif self.type == EPG_TYPE_INFOBARGRAPH:
self.session.openWithCallback(self.onDateTimeInputClosed, TimeDateInput, config.epgselection.infobar_prevtime)
def onDateTimeInputClosed(self, ret):
if len(ret) > 1:
if ret[0]:
if self.type == EPG_TYPE_MULTI:
self.ask_time = ret[1]
self['list'].fillMultiEPG(self.services, ret[1])
elif self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
now = time() - int(config.epg.histminutes.value) * 60
if self.type == EPG_TYPE_GRAPH:
self.ask_time -= self.ask_time % (int(config.epgselection.graph_roundto.value) * 60)
elif self.type == EPG_TYPE_INFOBARGRAPH:
self.ask_time -= self.ask_time % (int(config.epgselection.infobar_roundto.value) * 60)
l = self['list']
l.resetOffset()
l.fillGraphEPG(None, self.ask_time)
self.moveTimeLines(True)
if self.eventviewDialog and (self.type == EPG_TYPE_INFOBAR or self.type == EPG_TYPE_INFOBARGRAPH):
self.infoKeyPressed(True)
def infoKeyPressed(self, eventviewopen=False):
cur = self['list'].getCurrent()
event = cur[0]
service = cur[1]
if event is not None and not self.eventviewDialog and not eventviewopen:
if self.type != EPG_TYPE_SIMILAR:
if self.type == EPG_TYPE_INFOBARGRAPH:
self.eventviewDialog = self.session.instantiateDialog(EventViewSimple,event, service, skin='InfoBarEventView')
self.eventviewDialog.show()
else:
self.session.open(EventViewEPGSelect, event, service, callback=self.eventViewCallback, similarEPGCB=self.openSimilarList)
elif self.eventviewDialog and not eventviewopen:
self.eventviewDialog.hide()
del self.eventviewDialog
self.eventviewDialog = None
elif event is not None and self.eventviewDialog and eventviewopen:
if self.type != EPG_TYPE_SIMILAR:
if self.type == EPG_TYPE_INFOBAR or self.type == EPG_TYPE_INFOBARGRAPH:
self.eventviewDialog.hide()
self.eventviewDialog = self.session.instantiateDialog(EventViewSimple,event, service, skin='InfoBarEventView')
self.eventviewDialog.show()
def redButtonPressed(self):
self.closeEventViewDialog()
from InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if not InfoBarInstance.LongButtonPressed:
self.openIMDb()
def redButtonPressedLong(self):
self.closeEventViewDialog()
from InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if InfoBarInstance.LongButtonPressed:
self.sortEpg()
def greenButtonPressed(self):
self.closeEventViewDialog()
from InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if not InfoBarInstance.LongButtonPressed:
self.RecordTimerQuestion(True)
def greenButtonPressedLong(self):
self.closeEventViewDialog()
from InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if InfoBarInstance.LongButtonPressed:
self.showTimerList()
def yellowButtonPressed(self):
self.closeEventViewDialog()
from InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if not InfoBarInstance.LongButtonPressed:
self.openEPGSearch()
def blueButtonPressed(self):
self.closeEventViewDialog()
from InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if not InfoBarInstance.LongButtonPressed:
self.addAutoTimer()
def blueButtonPressedLong(self):
self.closeEventViewDialog()
from InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if InfoBarInstance.LongButtonPressed:
self.showAutoTimerList()
def openSimilarList(self, eventid, refstr):
self.session.open(EPGSelection, refstr, None, eventid)
def setServices(self, services):
self.services = services
self.onCreate()
def setService(self, service):
self.currentService = service
self.onCreate()
def eventViewCallback(self, setEvent, setService, val):
l = self['list']
old = l.getCurrent()
if self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH:
self.updEvent(val, False)
elif val == -1:
self.moveUp()
elif val == +1:
self.moveDown()
cur = l.getCurrent()
if (self.type == EPG_TYPE_MULTI or self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH) and cur[0] is None and cur[1].ref != old[1].ref:
self.eventViewCallback(setEvent, setService, val)
else:
setService(cur[1])
setEvent(cur[0])
def eventSelected(self):
self.infoKeyPressed()
def sortEpg(self):
if self.type == EPG_TYPE_SINGLE or self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR:
if config.epgselection.sort.value == '0':
config.epgselection.sort.setValue('1')
else:
config.epgselection.sort.setValue('0')
config.epgselection.sort.save()
configfile.save()
self['list'].sortSingleEPG(int(config.epgselection.sort.value))
def OpenSingleEPG(self):
cur = self['list'].getCurrent()
if cur[0] is not None:
event = cur[0]
serviceref = cur[1].ref
if serviceref is not None:
self.session.open(SingleEPG, serviceref)
def openIMDb(self):
try:
from Plugins.Extensions.IMDb.plugin import IMDB, IMDBEPGSelection
try:
cur = self['list'].getCurrent()
event = cur[0]
name = event.getEventName()
except:
name = ''
self.session.open(IMDB, name, False)
except ImportError:
self.session.open(MessageBox, _('The IMDb plugin is not installed!\nPlease install it.'), type=MessageBox.TYPE_INFO, timeout=10)
def openEPGSearch(self):
try:
from Plugins.Extensions.EPGSearch.EPGSearch import EPGSearch
try:
cur = self['list'].getCurrent()
event = cur[0]
name = event.getEventName()
except:
name = ''
self.session.open(EPGSearch, name, False)
except ImportError:
self.session.open(MessageBox, _('The EPGSearch plugin is not installed!\nPlease install it.'), type=MessageBox.TYPE_INFO, timeout=10)
def addAutoTimer(self):
try:
from Plugins.Extensions.AutoTimer.AutoTimerEditor import addAutotimerFromEvent
cur = self['list'].getCurrent()
event = cur[0]
if not event:
return
serviceref = cur[1]
addAutotimerFromEvent(self.session, evt=event, service=serviceref)
self.refreshTimer.start(3000)
except ImportError:
self.session.open(MessageBox, _('The AutoTimer plugin is not installed!\nPlease install it.'), type=MessageBox.TYPE_INFO, timeout=10)
def addAutoTimerSilent(self):
try:
from Plugins.Extensions.AutoTimer.AutoTimerEditor import addAutotimerFromEventSilent
cur = self['list'].getCurrent()
event = cur[0]
if not event:
return
serviceref = cur[1]
addAutotimerFromEventSilent(self.session, evt=event, service=serviceref)
self.refreshTimer.start(3000)
except ImportError:
self.session.open(MessageBox, _('The AutoTimer plugin is not installed!\nPlease install it.'), type=MessageBox.TYPE_INFO, timeout=10)
def showTimerList(self):
from Screens.TimerEdit import TimerEditList
self.session.open(TimerEditList)
def showAutoTimerList(self):
global autopoller
global autotimer
try:
from Plugins.Extensions.AutoTimer.plugin import main, autostart
from Plugins.Extensions.AutoTimer.AutoTimer import AutoTimer
from Plugins.Extensions.AutoTimer.AutoPoller import AutoPoller
autopoller = AutoPoller()
autotimer = AutoTimer()
try:
autotimer.readXml()
except SyntaxError as se:
self.session.open(MessageBox, _('Your config file is not well-formed:\n%s') % str(se), type=MessageBox.TYPE_ERROR, timeout=10)
return
if autopoller is not None:
autopoller.stop()
from Plugins.Extensions.AutoTimer.AutoTimerOverview import AutoTimerOverview
self.session.openWithCallback(self.editCallback, AutoTimerOverview, autotimer)
except ImportError:
self.session.open(MessageBox, _('The AutoTimer plugin is not installed!\nPlease install it.'), type=MessageBox.TYPE_INFO, timeout=10)
def editCallback(self, session):
global autopoller
global autotimer
if session is not None:
autotimer.writeXml()
autotimer.parseEPG()
if config.plugins.autotimer.autopoll.value:
if autopoller is None:
from Plugins.Extensions.AutoTimer.AutoPoller import AutoPoller
autopoller = AutoPoller()
autopoller.start()
else:
autopoller = None
autotimer = None
def timerAdd(self):
self.RecordTimerQuestion(True)
def timerAdd(self):
self.RecordTimerQuestion(True)
def editTimer(self, timer):
self.session.open(TimerEntry, timer)
def removeTimer(self, timer):
self.closeChoiceBoxDialog()
timer.afterEvent = AFTEREVENT.NONE
self.session.nav.RecordTimer.removeEntry(timer)
self['key_green'].setText(_('Add Timer'))
self.key_green_choice = self.ADD_TIMER
self.refreshlist()
def disableTimer(self, timer):
self.closeChoiceBoxDialog()
timer.disable()
self.session.nav.RecordTimer.timeChanged(timer)
self['key_green'].setText(_('Add Timer'))
self.key_green_choice = self.ADD_TIMER
self.refreshlist()
def RecordTimerQuestion(self, manual=False):
cur = self['list'].getCurrent()
event = cur[0]
serviceref = cur[1]
if event is None:
return
eventid = event.getEventId()
refstr = ':'.join(serviceref.ref.toString().split(':')[:11])
title = None
for timer in self.session.nav.RecordTimer.timer_list:
if timer.eit == eventid and ':'.join(timer.service_ref.ref.toString().split(':')[:11]) == refstr:
cb_func1 = lambda ret: self.removeTimer(timer)
cb_func2 = lambda ret: self.editTimer(timer)
cb_func3 = lambda ret: self.disableTimer(timer)
menu = [(_("Delete timer"), 'CALLFUNC', self.RemoveChoiceBoxCB, cb_func1), (_("Edit timer"), 'CALLFUNC', self.RemoveChoiceBoxCB, cb_func2), (_("Disable timer"), 'CALLFUNC', self.RemoveChoiceBoxCB, cb_func3)]
title = _("Select action for timer %s:") % event.getEventName()
break
else:
if not manual:
menu = [(_("Add Timer"), 'CALLFUNC', self.ChoiceBoxCB, self.doRecordTimer), (_("Add AutoTimer"), 'CALLFUNC', self.ChoiceBoxCB, self.addAutoTimerSilent)]
title = "%s?" % event.getEventName()
else:
newEntry = RecordTimerEntry(serviceref, checkOldTimers=True, dirname=preferredTimerPath(), *parseEvent(event))
self.session.openWithCallback(self.finishedAdd, TimerEntry, newEntry)
if title:
self.ChoiceBoxDialog = self.session.instantiateDialog(ChoiceBox, title=title, list=menu, keys=['green', 'blue'], skin_name="RecordTimerQuestion")
serviceref = eServiceReference(str(self['list'].getCurrent()[1]))
posy = self['list'].getSelectionPosition(serviceref)
self.ChoiceBoxDialog.instance.move(ePoint(posy[0]-self.ChoiceBoxDialog.instance.size().width(),self.instance.position().y()+posy[1]))
self.showChoiceBoxDialog()
def recButtonPressed(self):
from InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if not InfoBarInstance.LongButtonPressed:
self.RecordTimerQuestion()
def recButtonPressedLong(self):
from InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if InfoBarInstance.LongButtonPressed:
self.doZapTimer()
def RemoveChoiceBoxCB(self, choice):
self.closeChoiceBoxDialog()
if choice:
choice(self)
def ChoiceBoxCB(self, choice):
self.closeChoiceBoxDialog()
if choice:
try:
choice()
except:
choice
def showChoiceBoxDialog(self):
self['okactions'].setEnabled(False)
if self.has_key('epgcursoractions'):
self['epgcursoractions'].setEnabled(False)
self['colouractions'].setEnabled(False)
self['recordingactions'].setEnabled(False)
self['epgactions'].setEnabled(False)
self["dialogactions"].setEnabled(True)
self.ChoiceBoxDialog['actions'].execBegin()
self.ChoiceBoxDialog.show()
if self.has_key('input_actions'):
self['input_actions'].setEnabled(False)
def closeChoiceBoxDialog(self):
self["dialogactions"].setEnabled(False)
if self.ChoiceBoxDialog:
self.ChoiceBoxDialog['actions'].execEnd()
self.session.deleteDialog(self.ChoiceBoxDialog)
self['okactions'].setEnabled(True)
if self.has_key('epgcursoractions'):
self['epgcursoractions'].setEnabled(True)
self['colouractions'].setEnabled(True)
self['recordingactions'].setEnabled(True)
self['epgactions'].setEnabled(True)
if self.has_key('input_actions'):
self['input_actions'].setEnabled(True)
def doRecordTimer(self):
self.doInstantTimer(0)
def doZapTimer(self):
self.doInstantTimer(1)
def doInstantTimer(self, zap):
cur = self['list'].getCurrent()
event = cur[0]
serviceref = cur[1]
if event is None:
return
eventid = event.getEventId()
refstr = serviceref.ref.toString()
newEntry = RecordTimerEntry(serviceref, checkOldTimers=True, *parseEvent(event))
self.InstantRecordDialog = self.session.instantiateDialog(InstantRecordTimerEntry, newEntry, zap)
retval = [True, self.InstantRecordDialog.retval()]
self.session.deleteDialogWithCallback(self.finishedAdd, self.InstantRecordDialog, retval)
def finishedAdd(self, answer):
if answer[0]:
entry = answer[1]
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
for x in simulTimerList:
if x.setAutoincreaseEnd(entry):
self.session.nav.RecordTimer.timeChanged(x)
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
if not entry.repeated and not config.recording.margin_before.value and not config.recording.margin_after.value and len(simulTimerList) > 1:
change_time = False
conflict_begin = simulTimerList[1].begin
conflict_end = simulTimerList[1].end
if conflict_begin == entry.end:
entry.end -= 30
change_time = True
elif entry.begin == conflict_end:
entry.begin += 30
change_time = True
if change_time:
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
self.session.openWithCallback(self.finishSanityCorrection, TimerSanityConflict, simulTimerList)
self["key_green"].setText(_("Change timer"))
self.key_green_choice = self.REMOVE_TIMER
else:
self['key_green'].setText(_('Add Timer'))
self.key_green_choice = self.ADD_TIMER
self.refreshlist()
def finishSanityCorrection(self, answer):
self.finishedAdd(answer)
def OK(self):
from InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if not InfoBarInstance.LongButtonPressed:
if self.zapnumberstarted:
self.dozumberzap()
else:
if config.epgselection.graph_ok.value == 'Zap' or config.epgselection.enhanced_ok.value == 'Zap' or config.epgselection.infobar_ok.value == 'Zap' or config.epgselection.multi_ok.value == 'Zap':
self.zapTo()
if config.epgselection.graph_ok.value == 'Zap + Exit' or config.epgselection.enhanced_ok.value == 'Zap + Exit' or config.epgselection.infobar_ok.value == 'Zap + Exit' or config.epgselection.multi_ok.value == 'Zap + Exit':
self.zap()
def OKLong(self):
from InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if InfoBarInstance.LongButtonPressed:
if self.zapnumberstarted:
self.dozumberzap()
else:
if config.epgselection.graph_oklong.value == 'Zap' or config.epgselection.enhanced_oklong.value == 'Zap' or config.epgselection.infobar_oklong.value == 'Zap' or config.epgselection.multi_oklong.value == 'Zap':
self.zapTo()
if config.epgselection.graph_oklong.value == 'Zap + Exit' or config.epgselection.enhanced_oklong.value == 'Zap + Exit' or config.epgselection.infobar_oklong.value == 'Zap + Exit' or config.epgselection.multi_oklong.value == 'Zap + Exit':
self.zap()
def epgButtonPressed(self):
self.OpenSingleEPG()
def Info(self):
from InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if not InfoBarInstance.LongButtonPressed:
if self.type == EPG_TYPE_GRAPH and config.epgselection.graph_info.value == 'Channel Info':
self.infoKeyPressed()
elif self.type == EPG_TYPE_GRAPH and config.epgselection.graph_info.value == 'Single EPG':
self.OpenSingleEPG()
else:
self.infoKeyPressed()
def InfoLong(self):
from InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if InfoBarInstance.LongButtonPressed:
if self.type == EPG_TYPE_GRAPH and config.epgselection.graph_infolong.value == 'Channel Info':
self.infoKeyPressed()
elif self.type == EPG_TYPE_GRAPH and config.epgselection.graph_infolong.value == 'Single EPG':
self.OpenSingleEPG()
else:
self.OpenSingleEPG()
def applyButtonState(self, state):
if state == 0:
self['now_button'].hide()
self['now_button_sel'].hide()
self['next_button'].hide()
self['next_button_sel'].hide()
self['more_button'].hide()
self['more_button_sel'].hide()
self['now_text'].hide()
self['next_text'].hide()
self['more_text'].hide()
self['key_red'].setText('')
else:
if state == 1:
self['now_button_sel'].show()
self['now_button'].hide()
else:
self['now_button'].show()
self['now_button_sel'].hide()
if state == 2:
self['next_button_sel'].show()
self['next_button'].hide()
else:
self['next_button'].show()
self['next_button_sel'].hide()
if state == 3:
self['more_button_sel'].show()
self['more_button'].hide()
else:
self['more_button'].show()
self['more_button_sel'].hide()
def onSelectionChanged(self):
cur = self['list'].getCurrent()
event = cur[0]
self['Event'].newEvent(event)
if cur[1] is None:
self['Service'].newService(None)
else:
self['Service'].newService(cur[1].ref)
if self.type == EPG_TYPE_MULTI:
count = self['list'].getCurrentChangeCount()
if self.ask_time != -1:
self.applyButtonState(0)
elif count > 1:
self.applyButtonState(3)
elif count > 0:
self.applyButtonState(2)
else:
self.applyButtonState(1)
datestr = ''
if event is not None:
now = time()
beg = event.getBeginTime()
nowTime = localtime(now)
begTime = localtime(beg)
if nowTime[2] != begTime[2]:
datestr = strftime(_('%A %e %b'), begTime)
else:
datestr = '%s' % _('Today')
self['date'].setText(datestr)
if cur[1] is None or cur[1].getServiceName() == '':
if self.key_green_choice != self.EMPTY:
self['key_green'].setText('')
self.key_green_choice = self.EMPTY
return
if event is None:
if self.key_green_choice != self.EMPTY:
self['key_green'].setText('')
self.key_green_choice = self.EMPTY
return
serviceref = cur[1]
eventid = event.getEventId()
refstr = ':'.join(serviceref.ref.toString().split(':')[:11])
isRecordEvent = False
for timer in self.session.nav.RecordTimer.timer_list:
if timer.eit == eventid and ':'.join(timer.service_ref.ref.toString().split(':')[:11]) == refstr:
isRecordEvent = True
break
if isRecordEvent and self.key_green_choice != self.REMOVE_TIMER:
self["key_green"].setText(_("Change timer"))
self.key_green_choice = self.REMOVE_TIMER
elif not isRecordEvent and self.key_green_choice != self.ADD_TIMER:
self['key_green'].setText(_('Add Timer'))
self.key_green_choice = self.ADD_TIMER
if self.eventviewDialog and (self.type == EPG_TYPE_INFOBAR or self.type == EPG_TYPE_INFOBARGRAPH):
self.infoKeyPressed(True)
def moveTimeLines(self, force = False):
self.updateTimelineTimer.start((60 - int(time()) % 60) * 1000)
self['timeline_text'].setEntries(self['list'], self['timeline_now'], self.time_lines, force)
self['list'].l.invalidate()
def isPlayable(self):
current = ServiceReference(self.servicelist.getCurrentSelection())
return not current.ref.flags & (eServiceReference.isMarker | eServiceReference.isDirectory)
def setServicelistSelection(self, bouquet, service):
if self.servicelist:
if self.servicelist.getRoot() != bouquet:
self.servicelist.clearPath()
self.servicelist.enterPath(self.servicelist.bouquet_root)
self.servicelist.enterPath(bouquet)
self.servicelist.setCurrentSelection(service)
def closeEventViewDialog(self):
if self.eventviewDialog:
self.eventviewDialog.hide()
del self.eventviewDialog
self.eventviewDialog = None
def closeScreen(self):
if self.type == EPG_TYPE_SINGLE:
self.close()
return # stop and do not continue.
if self.session.nav.getCurrentlyPlayingServiceOrGroup() and self.StartRef and self.session.nav.getCurrentlyPlayingServiceOrGroup().toString() != self.StartRef.toString():
if self.zapFunc and self.StartRef and self.StartBouquet:
if ((self.type == EPG_TYPE_GRAPH and config.epgselection.graph_preview_mode.value) or
(self.type == EPG_TYPE_MULTI and config.epgselection.multi_preview_mode.value) or
(self.type in (EPG_TYPE_INFOBAR, EPG_TYPE_INFOBARGRAPH) and config.epgselection.infobar_preview_mode.value in ('1', '2')) or
(self.type == EPG_TYPE_ENHANCED and config.epgselection.enhanced_preview_mode.value)):
if '0:0:0:0:0:0:0:0:0' not in self.StartRef.toString():
self.zapFunc(None, zapback = True)
elif '0:0:0:0:0:0:0:0:0' in self.StartRef.toString():
self.session.nav.playService(self.StartRef)
else:
self.zapFunc(None, False)
if self.session.pipshown:
self.session.pipshown = False
del self.session.pip
self.closeEventViewDialog()
self.close(True)
def zap(self):
if self.zapFunc:
self.zapSelectedService()
self.closeEventViewDialog()
self.close(True)
else:
self.closeEventViewDialog()
self.close()
def zapSelectedService(self, prev=False):
currservice = self.session.nav.getCurrentlyPlayingServiceReference() and str(self.session.nav.getCurrentlyPlayingServiceReference().toString()) or None
if self.session.pipshown:
self.prevch = self.session.pip.getCurrentService() and str(self.session.pip.getCurrentService().toString()) or None
else:
self.prevch = self.session.nav.getCurrentlyPlayingServiceReference() and str(self.session.nav.getCurrentlyPlayingServiceReference().toString()) or None
lst = self["list"]
count = lst.getCurrentChangeCount()
if count == 0:
ref = lst.getCurrent()[1]
if ref is not None:
if (self.type == EPG_TYPE_INFOBAR or self.type == EPG_TYPE_INFOBARGRAPH) and config.epgselection.infobar_preview_mode.value == '2':
if not prev:
if self.session.pipshown:
self.session.pipshown = False
del self.session.pip
self.zapFunc(ref.ref, bouquet = self.getCurrentBouquet(), preview = False)
return
if not self.session.pipshown:
self.session.pip = self.session.instantiateDialog(PictureInPicture)
self.session.pip.show()
self.session.pipshown = True
n_service = self.pipServiceRelation.get(str(ref.ref), None)
if n_service is not None:
service = eServiceReference(n_service)
else:
service = ref.ref
if self.currch == service.toString():
if self.session.pipshown:
self.session.pipshown = False
del self.session.pip
self.zapFunc(ref.ref, bouquet = self.getCurrentBouquet(), preview = False)
return
if self.prevch != service.toString() and currservice != service.toString():
self.session.pip.playService(service)
self.currch = self.session.pip.getCurrentService() and str(self.session.pip.getCurrentService().toString())
else:
self.zapFunc(ref.ref, bouquet = self.getCurrentBouquet(), preview = prev)
self.currch = self.session.nav.getCurrentlyPlayingServiceReference() and str(self.session.nav.getCurrentlyPlayingServiceReference().toString())
self['list'].setCurrentlyPlaying(self.session.nav.getCurrentlyPlayingServiceOrGroup())
def zapTo(self):
if self.session.nav.getCurrentlyPlayingServiceOrGroup() and '0:0:0:0:0:0:0:0:0' in self.session.nav.getCurrentlyPlayingServiceOrGroup().toString():
from Screens.InfoBarGenerics import setResumePoint
setResumePoint(self.session)
if self.zapFunc:
self.zapSelectedService(True)
self.refreshTimer.start(2000)
if not self.currch or self.currch == self.prevch:
if self.zapFunc:
self.zapFunc(None, False)
self.closeEventViewDialog()
self.close('close')
else:
self.closeEventViewDialog()
self.close()
def keyNumberGlobal(self, number):
if self.type == EPG_TYPE_GRAPH:
if number == 1:
timeperiod = int(config.epgselection.graph_prevtimeperiod.value)
if timeperiod > 60:
timeperiod -= 60
self['list'].setEpoch(timeperiod)
config.epgselection.graph_prevtimeperiod.setValue(timeperiod)
self.moveTimeLines()
elif number == 2:
self.prevPage()
elif number == 3:
timeperiod = int(config.epgselection.graph_prevtimeperiod.value)
if timeperiod < 300:
timeperiod += 60
self['list'].setEpoch(timeperiod)
config.epgselection.graph_prevtimeperiod.setValue(timeperiod)
self.moveTimeLines()
elif number == 4:
self.updEvent(-2)
elif number == 5:
now = time() - int(config.epg.histminutes.value) * 60
self.ask_time = now - now % (int(config.epgselection.graph_roundto.value) * 60)
self['list'].resetOffset()
self['list'].fillGraphEPG(None, self.ask_time)
self.moveTimeLines(True)
elif number == 6:
self.updEvent(+2)
elif number == 7:
if config.epgselection.graph_heightswitch.value:
config.epgselection.graph_heightswitch.setValue(False)
else:
config.epgselection.graph_heightswitch.setValue(True)
self['list'].setItemsPerPage()
self['list'].fillGraphEPG(None)
self.moveTimeLines()
elif number == 8:
self.nextPage()
elif number == 9:
basetime = localtime(self['list'].getTimeBase())
basetime = (basetime[0], basetime[1], basetime[2], int(config.epgselection.graph_primetimehour.value), int(config.epgselection.graph_primetimemins.value), 0, basetime[6], basetime[7], basetime[8])
self.ask_time = mktime(basetime)
if self.ask_time + 3600 < time():
self.ask_time += 86400
self['list'].resetOffset()
self['list'].fillGraphEPG(None, self.ask_time)
self.moveTimeLines(True)
elif number == 0:
self.toTop()
now = time() - int(config.epg.histminutes.value) * 60
self.ask_time = now - now % (int(config.epgselection.graph_roundto.value) * 60)
self['list'].resetOffset()
self['list'].fillGraphEPG(None, self.ask_time)
self.moveTimeLines()
elif self.type == EPG_TYPE_INFOBARGRAPH:
if number == 1:
timeperiod = int(config.epgselection.infobar_prevtimeperiod.value)
if timeperiod > 60:
timeperiod -= 60
self['list'].setEpoch(timeperiod)
config.epgselection.infobar_prevtimeperiod.setValue(timeperiod)
self.moveTimeLines()
elif number == 2:
self.prevPage()
elif number == 3:
timeperiod = int(config.epgselection.infobar_prevtimeperiod.value)
if timeperiod < 300:
timeperiod += 60
self['list'].setEpoch(timeperiod)
config.epgselection.infobar_prevtimeperiod.setValue(timeperiod)
self.moveTimeLines()
elif number == 4:
self.updEvent(-2)
elif number == 5:
now = time() - int(config.epg.histminutes.value) * 60
self.ask_time = now - now % (int(config.epgselection.infobar_roundto.value) * 60)
self['list'].resetOffset()
self['list'].fillGraphEPG(None, self.ask_time)
self.moveTimeLines(True)
elif number == 6:
self.updEvent(+2)
elif number == 8:
self.nextPage()
elif number == 9:
basetime = localtime(self['list'].getTimeBase())
basetime = (basetime[0], basetime[1], basetime[2], int(config.epgselection.infobar_primetimehour.value), int(config.epgselection.infobar_primetimemins.value), 0, basetime[6], basetime[7], basetime[8])
self.ask_time = mktime(basetime)
if self.ask_time + 3600 < time():
self.ask_time += 86400
self['list'].resetOffset()
self['list'].fillGraphEPG(None, self.ask_time)
self.moveTimeLines(True)
elif number == 0:
self.toTop()
now = time() - int(config.epg.histminutes.value) * 60
self.ask_time = now - now % (int(config.epgselection.infobar_roundto.value) * 60)
self['list'].resetOffset()
self['list'].fillGraphEPG(None, self.ask_time)
self.moveTimeLines()
else:
self.zapnumberstarted = True
self.NumberZapTimer.start(5000, True)
if not self.NumberZapField:
self.NumberZapField = str(number)
else:
self.NumberZapField += str(number)
self.handleServiceName()
self["number"].setText(self.zaptoservicename+'\n'+self.NumberZapField)
self["number"].show()
if len(self.NumberZapField) >= 4:
self.dozumberzap()
def dozumberzap(self):
self.zapnumberstarted = False
self.numberEntered(self.service, self.bouquet)
def handleServiceName(self):
if self.searchNumber:
self.service, self.bouquet = self.searchNumber(int(self.NumberZapField))
self.zaptoservicename = ServiceReference(self.service).getServiceName()
def numberEntered(self, service = None, bouquet = None):
if service is not None:
self.zapToNumber(service, bouquet)
def searchNumberHelper(self, serviceHandler, num, bouquet):
servicelist = serviceHandler.list(bouquet)
if servicelist is not None:
serviceIterator = servicelist.getNext()
while serviceIterator.valid():
if num == serviceIterator.getChannelNum():
return serviceIterator
serviceIterator = servicelist.getNext()
return None
def searchNumber(self, number):
bouquet = self.servicelist.getRoot()
service = None
serviceHandler = eServiceCenter.getInstance()
service = self.searchNumberHelper(serviceHandler, number, bouquet)
if config.usage.multibouquet.value:
service = self.searchNumberHelper(serviceHandler, number, bouquet)
if service is None:
bouquet = self.servicelist.bouquet_root
bouquetlist = serviceHandler.list(bouquet)
if bouquetlist is not None:
bouquet = bouquetlist.getNext()
while bouquet.valid():
if bouquet.flags & eServiceReference.isDirectory:
service = self.searchNumberHelper(serviceHandler, number, bouquet)
if service is not None:
playable = not service.flags & (eServiceReference.isMarker | eServiceReference.isDirectory) or service.flags & eServiceReference.isNumberedMarker
if not playable:
service = None
break
if config.usage.alternative_number_mode.value:
break
bouquet = bouquetlist.getNext()
return service, bouquet
def zapToNumber(self, service, bouquet):
self["number"].hide()
self.NumberZapField = None
self.CurrBouquet = bouquet
self.CurrService = service
if service is not None:
self.setServicelistSelection(bouquet, service)
self.onCreate()
class SingleEPG(EPGSelection):
def __init__(self, session, service, EPGtype="single"):
EPGSelection.__init__(self, session, service=service, EPGtype=EPGtype)
self.skinName = 'EPGSelection'
| gpl-2.0 |
seungjin/app5-seungjin-net.appspot.com | django/core/management/sql.py | 229 | 8259 | import os
import re
from django.conf import settings
from django.core.management.base import CommandError
from django.db import models
from django.db.models import get_models
def sql_create(app, style, connection):
"Returns a list of the CREATE TABLE SQL statements for the given app."
if connection.settings_dict['ENGINE'] == 'django.db.backends.dummy':
# This must be the "dummy" database backend, which means the user
# hasn't set ENGINE for the databse.
raise CommandError("Django doesn't know which syntax to use for your SQL statements,\n" +
"because you haven't specified the ENGINE setting for the database.\n" +
"Edit your settings file and change DATBASES['default']['ENGINE'] to something like\n" +
"'django.db.backends.postgresql' or 'django.db.backends.mysql'.")
# Get installed models, so we generate REFERENCES right.
# We trim models from the current app so that the sqlreset command does not
# generate invalid SQL (leaving models out of known_models is harmless, so
# we can be conservative).
app_models = models.get_models(app, include_auto_created=True)
final_output = []
tables = connection.introspection.table_names()
known_models = set([model for model in connection.introspection.installed_models(tables) if model not in app_models])
pending_references = {}
for model in app_models:
output, references = connection.creation.sql_create_model(model, style, known_models)
final_output.extend(output)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in known_models:
final_output.extend(connection.creation.sql_for_pending_references(refto, style, pending_references))
final_output.extend(connection.creation.sql_for_pending_references(model, style, pending_references))
# Keep track of the fact that we've created the table for this model.
known_models.add(model)
# Handle references to tables that are from other apps
# but don't exist physically.
not_installed_models = set(pending_references.keys())
if not_installed_models:
alter_sql = []
for model in not_installed_models:
alter_sql.extend(['-- ' + sql for sql in
connection.creation.sql_for_pending_references(model, style, pending_references)])
if alter_sql:
final_output.append('-- The following references should be added but depend on non-existent tables:')
final_output.extend(alter_sql)
return final_output
def sql_delete(app, style, connection):
"Returns a list of the DROP TABLE SQL statements for the given app."
# This should work even if a connection isn't available
try:
cursor = connection.cursor()
except:
cursor = None
# Figure out which tables already exist
if cursor:
table_names = connection.introspection.get_table_list(cursor)
else:
table_names = []
output = []
# Output DROP TABLE statements for standard application tables.
to_delete = set()
references_to_delete = {}
app_models = models.get_models(app, include_auto_created=True)
for model in app_models:
if cursor and connection.introspection.table_name_converter(model._meta.db_table) in table_names:
# The table exists, so it needs to be dropped
opts = model._meta
for f in opts.local_fields:
if f.rel and f.rel.to not in to_delete:
references_to_delete.setdefault(f.rel.to, []).append( (model, f) )
to_delete.add(model)
for model in app_models:
if connection.introspection.table_name_converter(model._meta.db_table) in table_names:
output.extend(connection.creation.sql_destroy_model(model, references_to_delete, style))
# Close database connection explicitly, in case this output is being piped
# directly into a database client, to avoid locking issues.
if cursor:
cursor.close()
connection.close()
return output[::-1] # Reverse it, to deal with table dependencies.
def sql_reset(app, style, connection):
"Returns a list of the DROP TABLE SQL, then the CREATE TABLE SQL, for the given module."
# This command breaks a lot and should be deprecated
import warnings
warnings.warn(
'This command has been deprecated. The command ``sqlflush`` can be used to delete everything. You can also use ALTER TABLE or DROP TABLE statements manually.',
PendingDeprecationWarning
)
return sql_delete(app, style, connection) + sql_all(app, style, connection)
def sql_flush(style, connection, only_django=False):
"""
Returns a list of the SQL statements used to flush the database.
If only_django is True, then only table names that have associated Django
models and are in INSTALLED_APPS will be included.
"""
if only_django:
tables = connection.introspection.django_table_names(only_existing=True)
else:
tables = connection.introspection.table_names()
statements = connection.ops.sql_flush(
style, tables, connection.introspection.sequence_list()
)
return statements
def sql_custom(app, style, connection):
"Returns a list of the custom table modifying SQL statements for the given app."
output = []
app_models = get_models(app)
app_dir = os.path.normpath(os.path.join(os.path.dirname(app.__file__), 'sql'))
for model in app_models:
output.extend(custom_sql_for_model(model, style, connection))
return output
def sql_indexes(app, style, connection):
"Returns a list of the CREATE INDEX SQL statements for all models in the given app."
output = []
for model in models.get_models(app):
output.extend(connection.creation.sql_indexes_for_model(model, style))
return output
def sql_all(app, style, connection):
"Returns a list of CREATE TABLE SQL, initial-data inserts, and CREATE INDEX SQL for the given module."
return sql_create(app, style, connection) + sql_custom(app, style, connection) + sql_indexes(app, style, connection)
def custom_sql_for_model(model, style, connection):
opts = model._meta
app_dir = os.path.normpath(os.path.join(os.path.dirname(models.get_app(model._meta.app_label).__file__), 'sql'))
output = []
# Post-creation SQL should come before any initial SQL data is loaded.
# However, this should not be done for models that are unmanaged or
# for fields that are part of a parent model (via model inheritance).
if opts.managed:
post_sql_fields = [f for f in opts.local_fields if hasattr(f, 'post_create_sql')]
for f in post_sql_fields:
output.extend(f.post_create_sql(style, model._meta.db_table))
# Some backends can't execute more than one SQL statement at a time,
# so split into separate statements.
statements = re.compile(r";[ \t]*$", re.M)
# Find custom SQL, if it's available.
backend_name = connection.settings_dict['ENGINE'].split('.')[-1]
sql_files = [os.path.join(app_dir, "%s.%s.sql" % (opts.object_name.lower(), backend_name)),
os.path.join(app_dir, "%s.sql" % opts.object_name.lower())]
for sql_file in sql_files:
if os.path.exists(sql_file):
fp = open(sql_file, 'U')
for statement in statements.split(fp.read().decode(settings.FILE_CHARSET)):
# Remove any comments from the file
statement = re.sub(ur"--.*([\n\Z]|$)", "", statement)
if statement.strip():
output.append(statement + u";")
fp.close()
return output
def emit_post_sync_signal(created_models, verbosity, interactive, db):
# Emit the post_sync signal for every application.
for app in models.get_apps():
app_name = app.__name__.split('.')[-2]
if verbosity >= 2:
print "Running post-sync handlers for application", app_name
models.signals.post_syncdb.send(sender=app, app=app,
created_models=created_models, verbosity=verbosity,
interactive=interactive, db=db)
| bsd-3-clause |
tsl143/zamboni | mkt/operators/management/commands/operator_user.py | 19 | 4594 | from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.db import DatabaseError
from mkt.carriers import CARRIER_CHOICE_DICT, CARRIER_MAP
from mkt.operators.models import OperatorPermission
from mkt.regions import REGIONS_CHOICES_ID_DICT, REGIONS_DICT
from mkt.users.models import UserProfile
all_opt = make_option('--all', action='store_true', dest='remove_all',
default=False,
help='Remove all operator permissions for the user')
class Command(BaseCommand):
args = '[command] [command_options]'
option_list = BaseCommand.option_list + (all_opt,)
help = ('Add, remove, or list operator permissions for a user:\n\n'
'manage.py operator_user add <email> <carrier> <region>\n'
'manage.py operator_user remove <email> <carrier> <region>\n'
'manage.py operator_user remove --all <email>\n'
'manage.py operator_user list <email>')
def get_user(self, email):
try:
return UserProfile.objects.get(email=email)
except UserProfile.DoesNotExist:
raise CommandError('No user account for: %s' % email)
def get_region_id(self, slug):
try:
return REGIONS_DICT[slug].id
except KeyError:
raise CommandError('Invalid region: %r' % slug)
def get_carrier_id(self, slug):
try:
return CARRIER_MAP[slug].id
except KeyError:
raise CommandError('Invalid carrier: %r' % slug)
def get_region_slug(self, id):
try:
return REGIONS_CHOICES_ID_DICT[id].slug
except KeyError:
raise CommandError('Invalid region: %r' % id)
def get_carrier_slug(self, id):
try:
return CARRIER_CHOICE_DICT[id].slug
except KeyError:
raise CommandError('Invalid carrier: %r' % id)
def get_ecr(self, args):
sliced = args[1:]
if not len(sliced) == 3:
raise CommandError('Did not pass <email> <carrier> <region>')
return sliced
def handle(self, *args, **options):
try:
cmd = args[0]
except IndexError:
raise CommandError('No command passed.')
if cmd == 'add':
email, carrier, region = self.get_ecr(args)
try:
OperatorPermission.objects.create(
user=self.get_user(email),
region=self.get_region_id(region),
carrier=self.get_carrier_id(carrier))
self.stdout.write('Created %s/%s permission for %s' % (
region, carrier, email))
except DatabaseError, e:
exception = CommandError('Unable to grant permission.')
exception.args = e.args
raise exception
elif cmd == 'remove':
if options['remove_all']:
user = self.get_user(args[1])
qs = OperatorPermission.objects.filter(user=user)
if len(qs):
qs.delete()
self.stdout.write('Removed all permissions for %s'
% args[1])
else:
raise CommandError('No permissions for %s' % args[1])
else:
email, carrier, region = self.get_ecr(args)
qs = OperatorPermission.objects.filter(
user=self.get_user(email),
region=self.get_region_id(region),
carrier=self.get_carrier_id(carrier))
if len(qs):
qs.delete()
self.stdout.write('Removed %s/%s permission for %s' % (
region, carrier, email))
else:
raise CommandError('No %s/%s permission for %s' % (
region, carrier, email))
elif cmd == 'list':
user = self.get_user(args[1])
qs = OperatorPermission.objects.filter(user=user)
if len(qs):
msg = ['Permissions for %s:' % args[1]]
for item in qs:
msg.append('- %s/%s' % (
self.get_region_slug(item.region),
self.get_carrier_slug(item.carrier),
))
self.stdout.write('\n'.join(msg))
else:
self.stdout.write('No permissions for %s' % args[1])
else:
raise CommandError('Invalid command: %s' % cmd)
| bsd-3-clause |
eduNEXT/edunext-platform | lms/djangoapps/grades/rest_api/serializers.py | 4 | 3690 | """
API Serializers
"""
from collections import defaultdict
from rest_framework import serializers
# pylint: disable=abstract-method
class GradingPolicySerializer(serializers.Serializer):
"""
Serializer for course grading policy.
"""
assignment_type = serializers.CharField(source='type')
count = serializers.IntegerField(source='min_count')
dropped = serializers.IntegerField(source='drop_count')
weight = serializers.FloatField()
def to_representation(self, instance):
"""
Return a representation of the grading policy.
"""
# Backwards compatibility with the behavior of DRF v2.
# When the grader dictionary was missing keys, DRF v2 would default to None;
# DRF v3 unhelpfully raises an exception.
return dict(
super(GradingPolicySerializer, self).to_representation(
defaultdict(lambda: None, instance)
)
)
class SectionBreakdownSerializer(serializers.Serializer):
"""
Serializer for the `section_breakdown` portion of a gradebook entry.
"""
attempted = serializers.BooleanField()
category = serializers.CharField()
label = serializers.CharField()
module_id = serializers.CharField()
percent = serializers.FloatField()
score_earned = serializers.FloatField()
score_possible = serializers.FloatField()
subsection_name = serializers.CharField()
class StudentGradebookEntrySerializer(serializers.Serializer):
"""
Serializer for student gradebook entry.
"""
user_id = serializers.IntegerField()
username = serializers.CharField()
email = serializers.EmailField()
external_user_key = serializers.CharField(required=False)
percent = serializers.FloatField()
section_breakdown = SectionBreakdownSerializer(many=True)
class SubsectionGradeOverrideSerializer(serializers.Serializer):
"""
Serializer for subsection grade override.
"""
earned_all_override = serializers.FloatField()
possible_all_override = serializers.FloatField()
earned_graded_override = serializers.FloatField()
possible_graded_override = serializers.FloatField()
class SubsectionGradeSerializer(serializers.Serializer):
"""
Serializer for subsection grade.
"""
earned_all = serializers.FloatField()
possible_all = serializers.FloatField()
earned_graded = serializers.FloatField()
possible_graded = serializers.FloatField()
class SubsectionGradeOverrideSimpleHistorySerializer(serializers.Serializer):
"""
Serializer for subsection grade override history.
"""
created = serializers.DateTimeField()
grade_id = serializers.IntegerField()
history_id = serializers.IntegerField()
earned_all_override = serializers.FloatField()
earned_graded_override = serializers.FloatField()
override_reason = serializers.CharField()
system = serializers.CharField()
history_date = serializers.DateTimeField()
history_type = serializers.CharField()
history_user = serializers.CharField()
history_user_id = serializers.IntegerField()
id = serializers.IntegerField()
possible_all_override = serializers.FloatField()
possible_graded_override = serializers.FloatField()
class SubsectionGradeResponseSerializer(serializers.Serializer):
"""
Serializer for subsection grade response.
"""
subsection_id = serializers.CharField()
user_id = serializers.IntegerField()
course_id = serializers.CharField()
original_grade = SubsectionGradeSerializer()
override = SubsectionGradeOverrideSerializer()
history = SubsectionGradeOverrideSimpleHistorySerializer(many=True)
| agpl-3.0 |
srsman/odoo | addons/base_gengo/res_company.py | 321 | 1890 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_company(osv.Model):
_name = "res.company"
_inherit = "res.company"
_columns = {
"gengo_private_key": fields.text("Gengo Private Key", copy=False, groups="base.group_system"),
"gengo_public_key": fields.text("Gengo Public Key", copy=False, groups="base.group_user"),
"gengo_comment": fields.text("Comments", help="This comment will be automatically be enclosed in each an every request sent to Gengo", groups="base.group_user"),
"gengo_auto_approve": fields.boolean("Auto Approve Translation ?", help="Jobs are Automatically Approved by Gengo.", groups="base.group_user"),
"gengo_sandbox": fields.boolean("Sandbox Mode", help="Check this box if you're using the sandbox mode of Gengo, mainly used for testing purpose."),
}
_defaults = {
"gengo_auto_approve": True,
}
| agpl-3.0 |
bgxavier/neutron | neutron/tests/unit/plugins/ibm/test_sdnve_plugin.py | 16 | 3610 | # Copyright 2014 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.extensions import portbindings
from neutron.tests.unit import _test_extension_portbindings as test_bindings
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin
from neutron.tests.unit.extensions import test_l3 as test_l3
from neutron.plugins.ibm.common import constants
_plugin_name = ('neutron.plugins.ibm.'
'sdnve_neutron_plugin.SdnvePluginV2')
HTTP_OK = 200
class MockClient(object):
def sdnve_list(self, resource, **params):
return (HTTP_OK, 'body')
def sdnve_show(self, resource, specific, **params):
return (HTTP_OK, 'body')
def sdnve_create(self, resource, body):
return (HTTP_OK, 'body')
def sdnve_update(self, resource, specific, body=None):
return (HTTP_OK, 'body')
def sdnve_delete(self, resource, specific):
return (HTTP_OK, 'body')
def sdnve_get_tenant_byid(self, os_tenant_id):
return (os_tenant_id, constants.TENANT_TYPE_OF)
def sdnve_check_and_create_tenant(
self, os_tenant_id, network_type=None):
return os_tenant_id
def sdnve_get_controller(self):
return
class MockKeystoneClient(object):
def __init__(self, **kwargs):
pass
def get_tenant_type(self, id):
return constants.TENANT_TYPE_OF
def get_tenant_name(self, id):
return "tenant name"
class IBMPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
def setUp(self):
with mock.patch('neutron.plugins.ibm.sdnve_api.' 'KeystoneClient',
new=MockKeystoneClient),\
mock.patch('neutron.plugins.ibm.sdnve_api.' 'Client',
new=MockClient):
super(IBMPluginV2TestCase, self).setUp(plugin=_plugin_name)
class TestIBMBasicGet(test_plugin.TestBasicGet,
IBMPluginV2TestCase):
pass
class TestIBMV2HTTPResponse(test_plugin.TestV2HTTPResponse,
IBMPluginV2TestCase):
pass
class TestIBMNetworksV2(test_plugin.TestNetworksV2,
IBMPluginV2TestCase):
pass
class TestIBMPortsV2(test_plugin.TestPortsV2,
IBMPluginV2TestCase):
pass
class TestIBMSubnetsV2(test_plugin.TestSubnetsV2,
IBMPluginV2TestCase):
pass
class TestIBMPortBinding(IBMPluginV2TestCase,
test_bindings.PortBindingsTestCase):
VIF_TYPE = portbindings.VIF_TYPE_OVS
class IBMPluginRouterTestCase(test_l3.L3NatDBIntTestCase):
def setUp(self):
with mock.patch('neutron.plugins.ibm.sdnve_api.' 'KeystoneClient',
new=MockKeystoneClient),\
mock.patch('neutron.plugins.ibm.sdnve_api.' 'Client',
new=MockClient):
super(IBMPluginRouterTestCase, self).setUp(plugin=_plugin_name)
def test_floating_port_status_not_applicable(self):
self.skipTest('Plugin changes floating port status')
| apache-2.0 |
crazcalm/AngelHack_python34 | myenv/Lib/site-packages/werkzeug/testsuite/routing.py | 97 | 28826 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.routing
~~~~~~~~~~~~~~~~~~~~~~~~~~
Routing tests.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug import routing as r
from werkzeug.wrappers import Response
from werkzeug.datastructures import ImmutableDict
from werkzeug.test import create_environ
class RoutingTestCase(WerkzeugTestCase):
def test_basic_routing(self):
map = r.Map([
r.Rule('/', endpoint='index'),
r.Rule('/foo', endpoint='foo'),
r.Rule('/bar/', endpoint='bar')
])
adapter = map.bind('example.org', '/')
assert adapter.match('/') == ('index', {})
assert adapter.match('/foo') == ('foo', {})
assert adapter.match('/bar/') == ('bar', {})
self.assert_raises(r.RequestRedirect, lambda: adapter.match('/bar'))
self.assert_raises(r.NotFound, lambda: adapter.match('/blub'))
adapter = map.bind('example.org', '/test')
try:
adapter.match('/bar')
except r.RequestRedirect as e:
assert e.new_url == 'http://example.org/test/bar/'
else:
self.fail('Expected request redirect')
adapter = map.bind('example.org', '/')
try:
adapter.match('/bar')
except r.RequestRedirect as e:
assert e.new_url == 'http://example.org/bar/'
else:
self.fail('Expected request redirect')
adapter = map.bind('example.org', '/')
try:
adapter.match('/bar', query_args={'aha': 'muhaha'})
except r.RequestRedirect as e:
assert e.new_url == 'http://example.org/bar/?aha=muhaha'
else:
self.fail('Expected request redirect')
adapter = map.bind('example.org', '/')
try:
adapter.match('/bar', query_args='aha=muhaha')
except r.RequestRedirect as e:
assert e.new_url == 'http://example.org/bar/?aha=muhaha'
else:
self.fail('Expected request redirect')
adapter = map.bind_to_environ(create_environ('/bar?foo=bar',
'http://example.org/'))
try:
adapter.match()
except r.RequestRedirect as e:
assert e.new_url == 'http://example.org/bar/?foo=bar'
else:
self.fail('Expected request redirect')
def test_environ_defaults(self):
environ = create_environ("/foo")
self.assert_strict_equal(environ["PATH_INFO"], '/foo')
m = r.Map([r.Rule("/foo", endpoint="foo"), r.Rule("/bar", endpoint="bar")])
a = m.bind_to_environ(environ)
self.assert_strict_equal(a.match("/foo"), ('foo', {}))
self.assert_strict_equal(a.match(), ('foo', {}))
self.assert_strict_equal(a.match("/bar"), ('bar', {}))
self.assert_raises(r.NotFound, a.match, "/bars")
def test_environ_nonascii_pathinfo(self):
environ = create_environ(u'/лошадь')
m = r.Map([
r.Rule(u'/', endpoint='index'),
r.Rule(u'/лошадь', endpoint='horse')
])
a = m.bind_to_environ(environ)
self.assert_strict_equal(a.match(u'/'), ('index', {}))
self.assert_strict_equal(a.match(u'/лошадь'), ('horse', {}))
self.assert_raises(r.NotFound, a.match, u'/барсук')
def test_basic_building(self):
map = r.Map([
r.Rule('/', endpoint='index'),
r.Rule('/foo', endpoint='foo'),
r.Rule('/bar/<baz>', endpoint='bar'),
r.Rule('/bar/<int:bazi>', endpoint='bari'),
r.Rule('/bar/<float:bazf>', endpoint='barf'),
r.Rule('/bar/<path:bazp>', endpoint='barp'),
r.Rule('/hehe', endpoint='blah', subdomain='blah')
])
adapter = map.bind('example.org', '/', subdomain='blah')
assert adapter.build('index', {}) == 'http://example.org/'
assert adapter.build('foo', {}) == 'http://example.org/foo'
assert adapter.build('bar', {'baz': 'blub'}) == 'http://example.org/bar/blub'
assert adapter.build('bari', {'bazi': 50}) == 'http://example.org/bar/50'
assert adapter.build('barf', {'bazf': 0.815}) == 'http://example.org/bar/0.815'
assert adapter.build('barp', {'bazp': 'la/di'}) == 'http://example.org/bar/la/di'
assert adapter.build('blah', {}) == '/hehe'
self.assert_raises(r.BuildError, lambda: adapter.build('urks'))
adapter = map.bind('example.org', '/test', subdomain='blah')
assert adapter.build('index', {}) == 'http://example.org/test/'
assert adapter.build('foo', {}) == 'http://example.org/test/foo'
assert adapter.build('bar', {'baz': 'blub'}) == 'http://example.org/test/bar/blub'
assert adapter.build('bari', {'bazi': 50}) == 'http://example.org/test/bar/50'
assert adapter.build('barf', {'bazf': 0.815}) == 'http://example.org/test/bar/0.815'
assert adapter.build('barp', {'bazp': 'la/di'}) == 'http://example.org/test/bar/la/di'
assert adapter.build('blah', {}) == '/test/hehe'
def test_defaults(self):
map = r.Map([
r.Rule('/foo/', defaults={'page': 1}, endpoint='foo'),
r.Rule('/foo/<int:page>', endpoint='foo')
])
adapter = map.bind('example.org', '/')
assert adapter.match('/foo/') == ('foo', {'page': 1})
self.assert_raises(r.RequestRedirect, lambda: adapter.match('/foo/1'))
assert adapter.match('/foo/2') == ('foo', {'page': 2})
assert adapter.build('foo', {}) == '/foo/'
assert adapter.build('foo', {'page': 1}) == '/foo/'
assert adapter.build('foo', {'page': 2}) == '/foo/2'
def test_greedy(self):
map = r.Map([
r.Rule('/foo', endpoint='foo'),
r.Rule('/<path:bar>', endpoint='bar'),
r.Rule('/<path:bar>/<path:blub>', endpoint='bar')
])
adapter = map.bind('example.org', '/')
assert adapter.match('/foo') == ('foo', {})
assert adapter.match('/blub') == ('bar', {'bar': 'blub'})
assert adapter.match('/he/he') == ('bar', {'bar': 'he', 'blub': 'he'})
assert adapter.build('foo', {}) == '/foo'
assert adapter.build('bar', {'bar': 'blub'}) == '/blub'
assert adapter.build('bar', {'bar': 'blub', 'blub': 'bar'}) == '/blub/bar'
def test_path(self):
map = r.Map([
r.Rule('/', defaults={'name': 'FrontPage'}, endpoint='page'),
r.Rule('/Special', endpoint='special'),
r.Rule('/<int:year>', endpoint='year'),
r.Rule('/<path:name>', endpoint='page'),
r.Rule('/<path:name>/edit', endpoint='editpage'),
r.Rule('/<path:name>/silly/<path:name2>', endpoint='sillypage'),
r.Rule('/<path:name>/silly/<path:name2>/edit', endpoint='editsillypage'),
r.Rule('/Talk:<path:name>', endpoint='talk'),
r.Rule('/User:<username>', endpoint='user'),
r.Rule('/User:<username>/<path:name>', endpoint='userpage'),
r.Rule('/Files/<path:file>', endpoint='files'),
])
adapter = map.bind('example.org', '/')
assert adapter.match('/') == ('page', {'name':'FrontPage'})
self.assert_raises(r.RequestRedirect, lambda: adapter.match('/FrontPage'))
assert adapter.match('/Special') == ('special', {})
assert adapter.match('/2007') == ('year', {'year':2007})
assert adapter.match('/Some/Page') == ('page', {'name':'Some/Page'})
assert adapter.match('/Some/Page/edit') == ('editpage', {'name':'Some/Page'})
assert adapter.match('/Foo/silly/bar') == ('sillypage', {'name':'Foo', 'name2':'bar'})
assert adapter.match('/Foo/silly/bar/edit') == ('editsillypage', {'name':'Foo', 'name2':'bar'})
assert adapter.match('/Talk:Foo/Bar') == ('talk', {'name':'Foo/Bar'})
assert adapter.match('/User:thomas') == ('user', {'username':'thomas'})
assert adapter.match('/User:thomas/projects/werkzeug') == \
('userpage', {'username':'thomas', 'name':'projects/werkzeug'})
assert adapter.match('/Files/downloads/werkzeug/0.2.zip') == \
('files', {'file':'downloads/werkzeug/0.2.zip'})
def test_dispatch(self):
env = create_environ('/')
map = r.Map([
r.Rule('/', endpoint='root'),
r.Rule('/foo/', endpoint='foo')
])
adapter = map.bind_to_environ(env)
raise_this = None
def view_func(endpoint, values):
if raise_this is not None:
raise raise_this
return Response(repr((endpoint, values)))
dispatch = lambda p, q=False: Response.force_type(adapter.dispatch(view_func, p,
catch_http_exceptions=q), env)
assert dispatch('/').data == b"('root', {})"
assert dispatch('/foo').status_code == 301
raise_this = r.NotFound()
self.assert_raises(r.NotFound, lambda: dispatch('/bar'))
assert dispatch('/bar', True).status_code == 404
def test_http_host_before_server_name(self):
env = {
'HTTP_HOST': 'wiki.example.com',
'SERVER_NAME': 'web0.example.com',
'SERVER_PORT': '80',
'SCRIPT_NAME': '',
'PATH_INFO': '',
'REQUEST_METHOD': 'GET',
'wsgi.url_scheme': 'http'
}
map = r.Map([r.Rule('/', endpoint='index', subdomain='wiki')])
adapter = map.bind_to_environ(env, server_name='example.com')
assert adapter.match('/') == ('index', {})
assert adapter.build('index', force_external=True) == 'http://wiki.example.com/'
assert adapter.build('index') == '/'
env['HTTP_HOST'] = 'admin.example.com'
adapter = map.bind_to_environ(env, server_name='example.com')
assert adapter.build('index') == 'http://wiki.example.com/'
def test_adapter_url_parameter_sorting(self):
map = r.Map([r.Rule('/', endpoint='index')], sort_parameters=True,
sort_key=lambda x: x[1])
adapter = map.bind('localhost', '/')
assert adapter.build('index', {'x': 20, 'y': 10, 'z': 30},
force_external=True) == 'http://localhost/?y=10&x=20&z=30'
def test_request_direct_charset_bug(self):
map = r.Map([r.Rule(u'/öäü/')])
adapter = map.bind('localhost', '/')
try:
adapter.match(u'/öäü')
except r.RequestRedirect as e:
assert e.new_url == 'http://localhost/%C3%B6%C3%A4%C3%BC/'
else:
self.fail('expected request redirect exception')
def test_request_redirect_default(self):
map = r.Map([r.Rule(u'/foo', defaults={'bar': 42}),
r.Rule(u'/foo/<int:bar>')])
adapter = map.bind('localhost', '/')
try:
adapter.match(u'/foo/42')
except r.RequestRedirect as e:
assert e.new_url == 'http://localhost/foo'
else:
self.fail('expected request redirect exception')
def test_request_redirect_default_subdomain(self):
map = r.Map([r.Rule(u'/foo', defaults={'bar': 42}, subdomain='test'),
r.Rule(u'/foo/<int:bar>', subdomain='other')])
adapter = map.bind('localhost', '/', subdomain='other')
try:
adapter.match(u'/foo/42')
except r.RequestRedirect as e:
assert e.new_url == 'http://test.localhost/foo'
else:
self.fail('expected request redirect exception')
def test_adapter_match_return_rule(self):
rule = r.Rule('/foo/', endpoint='foo')
map = r.Map([rule])
adapter = map.bind('localhost', '/')
assert adapter.match('/foo/', return_rule=True) == (rule, {})
def test_server_name_interpolation(self):
server_name = 'example.invalid'
map = r.Map([r.Rule('/', endpoint='index'),
r.Rule('/', endpoint='alt', subdomain='alt')])
env = create_environ('/', 'http://%s/' % server_name)
adapter = map.bind_to_environ(env, server_name=server_name)
assert adapter.match() == ('index', {})
env = create_environ('/', 'http://alt.%s/' % server_name)
adapter = map.bind_to_environ(env, server_name=server_name)
assert adapter.match() == ('alt', {})
env = create_environ('/', 'http://%s/' % server_name)
adapter = map.bind_to_environ(env, server_name='foo')
assert adapter.subdomain == '<invalid>'
def test_rule_emptying(self):
rule = r.Rule('/foo', {'meh': 'muh'}, 'x', ['POST'],
False, 'x', True, None)
rule2 = rule.empty()
assert rule.__dict__ == rule2.__dict__
rule.methods.add('GET')
assert rule.__dict__ != rule2.__dict__
rule.methods.discard('GET')
rule.defaults['meh'] = 'aha'
assert rule.__dict__ != rule2.__dict__
def test_rule_templates(self):
testcase = r.RuleTemplate(
[ r.Submount('/test/$app',
[ r.Rule('/foo/', endpoint='handle_foo')
, r.Rule('/bar/', endpoint='handle_bar')
, r.Rule('/baz/', endpoint='handle_baz')
]),
r.EndpointPrefix('${app}',
[ r.Rule('/${app}-blah', endpoint='bar')
, r.Rule('/${app}-meh', endpoint='baz')
]),
r.Subdomain('$app',
[ r.Rule('/blah', endpoint='x_bar')
, r.Rule('/meh', endpoint='x_baz')
])
])
url_map = r.Map(
[ testcase(app='test1')
, testcase(app='test2')
, testcase(app='test3')
, testcase(app='test4')
])
out = sorted([(x.rule, x.subdomain, x.endpoint)
for x in url_map.iter_rules()])
assert out == ([
('/blah', 'test1', 'x_bar'),
('/blah', 'test2', 'x_bar'),
('/blah', 'test3', 'x_bar'),
('/blah', 'test4', 'x_bar'),
('/meh', 'test1', 'x_baz'),
('/meh', 'test2', 'x_baz'),
('/meh', 'test3', 'x_baz'),
('/meh', 'test4', 'x_baz'),
('/test/test1/bar/', '', 'handle_bar'),
('/test/test1/baz/', '', 'handle_baz'),
('/test/test1/foo/', '', 'handle_foo'),
('/test/test2/bar/', '', 'handle_bar'),
('/test/test2/baz/', '', 'handle_baz'),
('/test/test2/foo/', '', 'handle_foo'),
('/test/test3/bar/', '', 'handle_bar'),
('/test/test3/baz/', '', 'handle_baz'),
('/test/test3/foo/', '', 'handle_foo'),
('/test/test4/bar/', '', 'handle_bar'),
('/test/test4/baz/', '', 'handle_baz'),
('/test/test4/foo/', '', 'handle_foo'),
('/test1-blah', '', 'test1bar'),
('/test1-meh', '', 'test1baz'),
('/test2-blah', '', 'test2bar'),
('/test2-meh', '', 'test2baz'),
('/test3-blah', '', 'test3bar'),
('/test3-meh', '', 'test3baz'),
('/test4-blah', '', 'test4bar'),
('/test4-meh', '', 'test4baz')
])
def test_non_string_parts(self):
m = r.Map([
r.Rule('/<foo>', endpoint='foo')
])
a = m.bind('example.com')
self.assert_equal(a.build('foo', {'foo': 42}), '/42')
def test_complex_routing_rules(self):
m = r.Map([
r.Rule('/', endpoint='index'),
r.Rule('/<int:blub>', endpoint='an_int'),
r.Rule('/<blub>', endpoint='a_string'),
r.Rule('/foo/', endpoint='nested'),
r.Rule('/foobar/', endpoint='nestedbar'),
r.Rule('/foo/<path:testing>/', endpoint='nested_show'),
r.Rule('/foo/<path:testing>/edit', endpoint='nested_edit'),
r.Rule('/users/', endpoint='users', defaults={'page': 1}),
r.Rule('/users/page/<int:page>', endpoint='users'),
r.Rule('/foox', endpoint='foox'),
r.Rule('/<path:bar>/<path:blub>', endpoint='barx_path_path')
])
a = m.bind('example.com')
assert a.match('/') == ('index', {})
assert a.match('/42') == ('an_int', {'blub': 42})
assert a.match('/blub') == ('a_string', {'blub': 'blub'})
assert a.match('/foo/') == ('nested', {})
assert a.match('/foobar/') == ('nestedbar', {})
assert a.match('/foo/1/2/3/') == ('nested_show', {'testing': '1/2/3'})
assert a.match('/foo/1/2/3/edit') == ('nested_edit', {'testing': '1/2/3'})
assert a.match('/users/') == ('users', {'page': 1})
assert a.match('/users/page/2') == ('users', {'page': 2})
assert a.match('/foox') == ('foox', {})
assert a.match('/1/2/3') == ('barx_path_path', {'bar': '1', 'blub': '2/3'})
assert a.build('index') == '/'
assert a.build('an_int', {'blub': 42}) == '/42'
assert a.build('a_string', {'blub': 'test'}) == '/test'
assert a.build('nested') == '/foo/'
assert a.build('nestedbar') == '/foobar/'
assert a.build('nested_show', {'testing': '1/2/3'}) == '/foo/1/2/3/'
assert a.build('nested_edit', {'testing': '1/2/3'}) == '/foo/1/2/3/edit'
assert a.build('users', {'page': 1}) == '/users/'
assert a.build('users', {'page': 2}) == '/users/page/2'
assert a.build('foox') == '/foox'
assert a.build('barx_path_path', {'bar': '1', 'blub': '2/3'}) == '/1/2/3'
def test_default_converters(self):
class MyMap(r.Map):
default_converters = r.Map.default_converters.copy()
default_converters['foo'] = r.UnicodeConverter
assert isinstance(r.Map.default_converters, ImmutableDict)
m = MyMap([
r.Rule('/a/<foo:a>', endpoint='a'),
r.Rule('/b/<foo:b>', endpoint='b'),
r.Rule('/c/<c>', endpoint='c')
], converters={'bar': r.UnicodeConverter})
a = m.bind('example.org', '/')
assert a.match('/a/1') == ('a', {'a': '1'})
assert a.match('/b/2') == ('b', {'b': '2'})
assert a.match('/c/3') == ('c', {'c': '3'})
assert 'foo' not in r.Map.default_converters
def test_build_append_unknown(self):
map = r.Map([
r.Rule('/bar/<float:bazf>', endpoint='barf')
])
adapter = map.bind('example.org', '/', subdomain='blah')
assert adapter.build('barf', {'bazf': 0.815, 'bif' : 1.0}) == \
'http://example.org/bar/0.815?bif=1.0'
assert adapter.build('barf', {'bazf': 0.815, 'bif' : 1.0},
append_unknown=False) == 'http://example.org/bar/0.815'
def test_method_fallback(self):
map = r.Map([
r.Rule('/', endpoint='index', methods=['GET']),
r.Rule('/<name>', endpoint='hello_name', methods=['GET']),
r.Rule('/select', endpoint='hello_select', methods=['POST']),
r.Rule('/search_get', endpoint='search', methods=['GET']),
r.Rule('/search_post', endpoint='search', methods=['POST'])
])
adapter = map.bind('example.com')
assert adapter.build('index') == '/'
assert adapter.build('index', method='GET') == '/'
assert adapter.build('hello_name', {'name': 'foo'}) == '/foo'
assert adapter.build('hello_select') == '/select'
assert adapter.build('hello_select', method='POST') == '/select'
assert adapter.build('search') == '/search_get'
assert adapter.build('search', method='GET') == '/search_get'
assert adapter.build('search', method='POST') == '/search_post'
def test_implicit_head(self):
url_map = r.Map([
r.Rule('/get', methods=['GET'], endpoint='a'),
r.Rule('/post', methods=['POST'], endpoint='b')
])
adapter = url_map.bind('example.org')
assert adapter.match('/get', method='HEAD') == ('a', {})
self.assert_raises(r.MethodNotAllowed, adapter.match,
'/post', method='HEAD')
def test_protocol_joining_bug(self):
m = r.Map([r.Rule('/<foo>', endpoint='x')])
a = m.bind('example.org')
assert a.build('x', {'foo': 'x:y'}) == '/x:y'
assert a.build('x', {'foo': 'x:y'}, force_external=True) == \
'http://example.org/x:y'
def test_allowed_methods_querying(self):
m = r.Map([r.Rule('/<foo>', methods=['GET', 'HEAD']),
r.Rule('/foo', methods=['POST'])])
a = m.bind('example.org')
assert sorted(a.allowed_methods('/foo')) == ['GET', 'HEAD', 'POST']
def test_external_building_with_port(self):
map = r.Map([
r.Rule('/', endpoint='index'),
])
adapter = map.bind('example.org:5000', '/')
built_url = adapter.build('index', {}, force_external=True)
assert built_url == 'http://example.org:5000/', built_url
def test_external_building_with_port_bind_to_environ(self):
map = r.Map([
r.Rule('/', endpoint='index'),
])
adapter = map.bind_to_environ(
create_environ('/', 'http://example.org:5000/'),
server_name="example.org:5000"
)
built_url = adapter.build('index', {}, force_external=True)
assert built_url == 'http://example.org:5000/', built_url
def test_external_building_with_port_bind_to_environ_wrong_servername(self):
map = r.Map([
r.Rule('/', endpoint='index'),
])
environ = create_environ('/', 'http://example.org:5000/')
adapter = map.bind_to_environ(environ, server_name="example.org")
assert adapter.subdomain == '<invalid>'
def test_converter_parser(self):
args, kwargs = r.parse_converter_args(u'test, a=1, b=3.0')
assert args == ('test',)
assert kwargs == {'a': 1, 'b': 3.0 }
args, kwargs = r.parse_converter_args('')
assert not args and not kwargs
args, kwargs = r.parse_converter_args('a, b, c,')
assert args == ('a', 'b', 'c')
assert not kwargs
args, kwargs = r.parse_converter_args('True, False, None')
assert args == (True, False, None)
args, kwargs = r.parse_converter_args('"foo", u"bar"')
assert args == ('foo', 'bar')
def test_alias_redirects(self):
m = r.Map([
r.Rule('/', endpoint='index'),
r.Rule('/index.html', endpoint='index', alias=True),
r.Rule('/users/', defaults={'page': 1}, endpoint='users'),
r.Rule('/users/index.html', defaults={'page': 1}, alias=True,
endpoint='users'),
r.Rule('/users/page/<int:page>', endpoint='users'),
r.Rule('/users/page-<int:page>.html', alias=True, endpoint='users'),
])
a = m.bind('example.com')
def ensure_redirect(path, new_url, args=None):
try:
a.match(path, query_args=args)
except r.RequestRedirect as e:
assert e.new_url == 'http://example.com' + new_url
else:
assert False, 'expected redirect'
ensure_redirect('/index.html', '/')
ensure_redirect('/users/index.html', '/users/')
ensure_redirect('/users/page-2.html', '/users/page/2')
ensure_redirect('/users/page-1.html', '/users/')
ensure_redirect('/users/page-1.html', '/users/?foo=bar', {'foo': 'bar'})
assert a.build('index') == '/'
assert a.build('users', {'page': 1}) == '/users/'
assert a.build('users', {'page': 2}) == '/users/page/2'
def test_double_defaults(self):
for prefix in '', '/aaa':
m = r.Map([
r.Rule(prefix + '/', defaults={'foo': 1, 'bar': False}, endpoint='x'),
r.Rule(prefix + '/<int:foo>', defaults={'bar': False}, endpoint='x'),
r.Rule(prefix + '/bar/', defaults={'foo': 1, 'bar': True}, endpoint='x'),
r.Rule(prefix + '/bar/<int:foo>', defaults={'bar': True}, endpoint='x')
])
a = m.bind('example.com')
assert a.match(prefix + '/') == ('x', {'foo': 1, 'bar': False})
assert a.match(prefix + '/2') == ('x', {'foo': 2, 'bar': False})
assert a.match(prefix + '/bar/') == ('x', {'foo': 1, 'bar': True})
assert a.match(prefix + '/bar/2') == ('x', {'foo': 2, 'bar': True})
assert a.build('x', {'foo': 1, 'bar': False}) == prefix + '/'
assert a.build('x', {'foo': 2, 'bar': False}) == prefix + '/2'
assert a.build('x', {'bar': False}) == prefix + '/'
assert a.build('x', {'foo': 1, 'bar': True}) == prefix + '/bar/'
assert a.build('x', {'foo': 2, 'bar': True}) == prefix + '/bar/2'
assert a.build('x', {'bar': True}) == prefix + '/bar/'
def test_host_matching(self):
m = r.Map([
r.Rule('/', endpoint='index', host='www.<domain>'),
r.Rule('/', endpoint='files', host='files.<domain>'),
r.Rule('/foo/', defaults={'page': 1}, host='www.<domain>', endpoint='x'),
r.Rule('/<int:page>', host='files.<domain>', endpoint='x')
], host_matching=True)
a = m.bind('www.example.com')
assert a.match('/') == ('index', {'domain': 'example.com'})
assert a.match('/foo/') == ('x', {'domain': 'example.com', 'page': 1})
try:
a.match('/foo')
except r.RequestRedirect as e:
assert e.new_url == 'http://www.example.com/foo/'
else:
assert False, 'expected redirect'
a = m.bind('files.example.com')
assert a.match('/') == ('files', {'domain': 'example.com'})
assert a.match('/2') == ('x', {'domain': 'example.com', 'page': 2})
try:
a.match('/1')
except r.RequestRedirect as e:
assert e.new_url == 'http://www.example.com/foo/'
else:
assert False, 'expected redirect'
def test_server_name_casing(self):
m = r.Map([
r.Rule('/', endpoint='index', subdomain='foo')
])
env = create_environ()
env['SERVER_NAME'] = env['HTTP_HOST'] = 'FOO.EXAMPLE.COM'
a = m.bind_to_environ(env, server_name='example.com')
assert a.match('/') == ('index', {})
env = create_environ()
env['SERVER_NAME'] = '127.0.0.1'
env['SERVER_PORT'] = '5000'
del env['HTTP_HOST']
a = m.bind_to_environ(env, server_name='example.com')
try:
a.match()
except r.NotFound:
pass
else:
assert False, 'Expected not found exception'
def test_redirect_request_exception_code(self):
exc = r.RequestRedirect('http://www.google.com/')
exc.code = 307
env = create_environ()
self.assert_strict_equal(exc.get_response(env).status_code, exc.code)
def test_unicode_rules(self):
m = r.Map([
r.Rule(u'/войти/', endpoint='enter'),
r.Rule(u'/foo+bar/', endpoint='foobar')
])
a = m.bind(u'☃.example.com')
try:
a.match(u'/войти')
except r.RequestRedirect as e:
self.assert_strict_equal(e.new_url, 'http://xn--n3h.example.com/'
'%D0%B2%D0%BE%D0%B9%D1%82%D0%B8/')
endpoint, values = a.match(u'/войти/')
self.assert_strict_equal(endpoint, 'enter')
self.assert_strict_equal(values, {})
try:
a.match(u'/foo+bar')
except r.RequestRedirect as e:
self.assert_strict_equal(e.new_url, 'http://xn--n3h.example.com/'
'foo+bar/')
endpoint, values = a.match(u'/foo+bar/')
self.assert_strict_equal(endpoint, 'foobar')
self.assert_strict_equal(values, {})
url = a.build('enter', {}, force_external=True)
self.assert_strict_equal(url, 'http://xn--n3h.example.com/%D0%B2%D0%BE%D0%B9%D1%82%D0%B8/')
url = a.build('foobar', {}, force_external=True)
self.assert_strict_equal(url, 'http://xn--n3h.example.com/foo+bar/')
def test_map_repr(self):
m = r.Map([
r.Rule(u'/wat', endpoint='enter'),
r.Rule(u'/woop', endpoint='foobar')
])
rv = repr(m)
self.assert_strict_equal(rv,
"Map([<Rule '/woop' -> foobar>, <Rule '/wat' -> enter>])")
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(RoutingTestCase))
return suite
| mit |
ar7z1/ansible | lib/ansible/modules/network/aci/aci_tenant_ep_retention_policy.py | 15 | 10632 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_tenant_ep_retention_policy
short_description: Manage End Point (EP) retention protocol policies (fv:EpRetPol)
description:
- Manage End Point (EP) retention protocol policies on Cisco ACI fabrics.
notes:
- The C(tenant) used must exist before using this module in your playbook.
The M(aci_tenant) module can be used for this.
- More information about the internal APIC class B(fv:EpRetPol) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Swetha Chunduri (@schunduri)
version_added: '2.4'
options:
tenant:
description:
- The name of an existing tenant.
aliases: [ tenant_name ]
epr_policy:
description:
- The name of the end point retention policy.
aliases: [ epr_name, name ]
bounce_age:
description:
- Bounce entry aging interval in seconds.
- Accepted values range between C(150) and C(65535); 0 is used for infinite.
- The APIC defaults to C(630) when unset during creation.
type: int
bounce_trigger:
description:
- Determines if the bounce entries are installed by RARP Flood or COOP Protocol.
- The APIC defaults to C(coop) when unset during creation.
choices: [ coop, flood ]
hold_interval:
description:
- Hold interval in seconds.
- Accepted values range between C(5) and C(65535).
- The APIC defaults to C(300) when unset during creation.
type: int
local_ep_interval:
description:
- Local end point aging interval in seconds.
- Accepted values range between C(120) and C(65535); 0 is used for infinite.
- The APIC defaults to C(900) when unset during creation.
type: int
remote_ep_interval:
description:
- Remote end point aging interval in seconds.
- Accepted values range between C(120) and C(65535); 0 is used for infinite.
- The APIC defaults to C(300) when unset during creation.
type: int
move_frequency:
description:
- Move frequency per second.
- Accepted values range between C(0) and C(65535); 0 is used for none.
- The APIC defaults to C(256) when unset during creation.
type: int
description:
description:
- Description for the End point rentention policy.
aliases: [ descr ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Add a new EPR policy
aci_epr_policy:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
epr_policy: EPRPol1
bounce_age: 630
hold_interval: 300
local_ep_interval: 900
remote_ep_interval: 300
move_frequency: 256
description: test
state: present
delegate_to: localhost
- name: Remove an EPR policy
aci_epr_policy:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
epr_policy: EPRPol1
state: absent
delegate_to: localhost
- name: Query an EPR policy
aci_epr_policy:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
epr_policy: EPRPol1
state: query
delegate_to: localhost
register: query_result
- name: Query all EPR policies
aci_epr_policy:
host: apic
username: admin
password: SomeSecretPassword
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
BOUNCE_TRIG_MAPPING = dict(coop='protocol', rarp='rarp-flood')
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects
epr_policy=dict(type='str', aliases=['epr_name', 'name']),
bounce_age=dict(type='int'),
bounce_trigger=dict(type='str', choices=['coop', 'flood']),
hold_interval=dict(type='int'),
local_ep_interval=dict(type='int'),
remote_ep_interval=dict(type='int'),
description=dict(type='str', aliases=['descr']),
move_frequency=dict(type='int'),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['epr_policy', 'tenant']],
['state', 'present', ['epr_policy', 'tenant']],
],
)
epr_policy = module.params['epr_policy']
bounce_age = module.params['bounce_age']
if bounce_age is not None and bounce_age != 0 and bounce_age not in range(150, 65536):
module.fail_json(msg="The bounce_age must be a value of 0 or between 150 and 65535")
if bounce_age == 0:
bounce_age = 'infinite'
bounce_trigger = module.params['bounce_trigger']
if bounce_trigger is not None:
bounce_trigger = BOUNCE_TRIG_MAPPING[bounce_trigger]
description = module.params['description']
hold_interval = module.params['hold_interval']
if hold_interval is not None and hold_interval not in range(5, 65536):
module.fail_json(msg="The hold_interval must be a value between 5 and 65535")
local_ep_interval = module.params['local_ep_interval']
if local_ep_interval is not None and local_ep_interval != 0 and local_ep_interval not in range(120, 65536):
module.fail_json(msg="The local_ep_interval must be a value of 0 or between 120 and 65535")
if local_ep_interval == 0:
local_ep_interval = "infinite"
move_frequency = module.params['move_frequency']
if move_frequency is not None and move_frequency not in range(65536):
module.fail_json(msg="The move_frequency must be a value between 0 and 65535")
if move_frequency == 0:
move_frequency = "none"
remote_ep_interval = module.params['remote_ep_interval']
if remote_ep_interval is not None and remote_ep_interval not in range(120, 65536):
module.fail_json(msg="The remote_ep_interval must be a value of 0 or between 120 and 65535")
if remote_ep_interval == 0:
remote_ep_interval = "infinite"
state = module.params['state']
tenant = module.params['tenant']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
module_object=tenant,
target_filter={'name': tenant},
),
subclass_1=dict(
aci_class='fvEpRetPol',
aci_rn='epRPol-{0}'.format(epr_policy),
module_object=epr_policy,
target_filter={'name': epr_policy},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='fvEpRetPol',
class_config=dict(
name=epr_policy,
descr=description,
bounceAgeIntvl=bounce_age,
bounceTrig=bounce_trigger,
holdIntvl=hold_interval,
localEpAgeIntvl=local_ep_interval,
remoteEpAgeIntvl=remote_ep_interval,
moveFreq=move_frequency,
),
)
aci.get_diff(aci_class='fvEpRetPol')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
go-bears/nupic | examples/network/core_encoders_demo.py | 4 | 6093 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import csv
import json
import os
from datetime import datetime
from pkg_resources import resource_filename
from nupic.data.file_record_stream import FileRecordStream
from nupic.engine import Network
from nupic.encoders import DateEncoder
from nupic.regions.SPRegion import SPRegion
from nupic.regions.TPRegion import TPRegion
def createNetwork():
network = Network()
#
# Sensors
#
# C++
consumptionSensor = network.addRegion('consumptionSensor', 'ScalarSensor',
json.dumps({'n': 120,
'w': 21,
'minValue': 0.0,
'maxValue': 100.0,
'clipInput': True}))
# Python
timestampSensor = network.addRegion("timestampSensor",
'py.PluggableEncoderSensor', "")
timestampSensor.getSelf().encoder = DateEncoder(timeOfDay=(21, 9.5),
name="timestamp_timeOfDay")
#
# Add a SPRegion, a region containing a spatial pooler
#
consumptionEncoderN = consumptionSensor.getParameter('n')
timestampEncoderN = timestampSensor.getSelf().encoder.getWidth()
inputWidth = consumptionEncoderN + timestampEncoderN
network.addRegion("sp", "py.SPRegion",
json.dumps({
"spatialImp": "cpp",
"globalInhibition": 1,
"columnCount": 2048,
"inputWidth": inputWidth,
"numActiveColumnsPerInhArea": 40,
"seed": 1956,
"potentialPct": 0.8,
"synPermConnected": 0.1,
"synPermActiveInc": 0.0001,
"synPermInactiveDec": 0.0005,
"maxBoost": 1.0,
}))
#
# Input to the Spatial Pooler
#
network.link("consumptionSensor", "sp", "UniformLink", "")
network.link("timestampSensor", "sp", "UniformLink", "")
#
# Add a TPRegion, a region containing a Temporal Memory
#
network.addRegion("tm", "py.TPRegion",
json.dumps({
"columnCount": 2048,
"cellsPerColumn": 32,
"inputWidth": 2048,
"seed": 1960,
"temporalImp": "cpp",
"newSynapseCount": 20,
"maxSynapsesPerSegment": 32,
"maxSegmentsPerCell": 128,
"initialPerm": 0.21,
"permanenceInc": 0.1,
"permanenceDec": 0.1,
"globalDecay": 0.0,
"maxAge": 0,
"minThreshold": 9,
"activationThreshold": 12,
"outputType": "normal",
"pamLength": 3,
}))
network.link("sp", "tm", "UniformLink", "")
network.link("tm", "sp", "UniformLink", "", srcOutput="topDownOut", destInput="topDownIn")
# Add the AnomalyRegion on top of the TPRegion
network.addRegion("anomalyRegion", "py.AnomalyRegion", json.dumps({}))
network.link("sp", "anomalyRegion", "UniformLink", "",
srcOutput="bottomUpOut", destInput="activeColumns")
network.link("tm", "anomalyRegion", "UniformLink", "",
srcOutput="topDownOut", destInput="predictedColumns")
# Enable topDownMode to get the predicted columns output
network.regions['tm'].setParameter("topDownMode", True)
# Enable inference mode so we get predictions
network.regions['tm'].setParameter("inferenceMode", True)
return network
def runNetwork(network):
consumptionSensor = network.regions['consumptionSensor']
timestampSensor = network.regions['timestampSensor']
anomalyRegion = network.regions['anomalyRegion']
filename = resource_filename("nupic.datafiles", "extra/hotgym/rec-center-hourly.csv")
csvReader = csv.reader(open(filename, 'r'))
csvReader.next()
csvReader.next()
csvReader.next()
for row in csvReader:
timestampStr, consumptionStr = row
# For core encoders, use the network API.
consumptionSensor.setParameter('sensedValue', float(consumptionStr))
# For Python encoders, circumvent the Network API.
# The inputs are often crazy Python types, for example:
t = datetime.strptime(timestampStr, "%m/%d/%y %H:%M")
timestampSensor.getSelf().setSensedValue(t)
network.run(1)
anomalyScore = anomalyRegion.getOutputData('rawAnomalyScore')[0]
print "Consumption: %s, Anomaly score: %f" % (consumptionStr, anomalyScore)
if __name__ == "__main__":
network = createNetwork()
runNetwork(network)
| agpl-3.0 |
skycucumber/restful | python/venv/lib/python2.7/site-packages/pbr/util.py | 5 | 22286 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (C) 2013 Association of Universities for Research in Astronomy
# (AURA)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of AURA and its representatives may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""The code in this module is mostly copy/pasted out of the distutils2 source
code, as recommended by Tarek Ziade. As such, it may be subject to some change
as distutils2 development continues, and will have to be kept up to date.
I didn't want to use it directly from distutils2 itself, since I do not want it
to be an installation dependency for our packages yet--it is still too unstable
(the latest version on PyPI doesn't even install).
"""
# These first two imports are not used, but are needed to get around an
# irritating Python bug that can crop up when using ./setup.py test.
# See: http://www.eby-sarna.com/pipermail/peak/2010-May/003355.html
try:
import multiprocessing # flake8: noqa
except ImportError:
pass
import logging # flake8: noqa
import os
import re
import sys
import traceback
from collections import defaultdict
import distutils.ccompiler
from distutils import log
from distutils.errors import (DistutilsOptionError, DistutilsModuleError,
DistutilsFileError)
from setuptools.command.egg_info import manifest_maker
from setuptools.dist import Distribution
from setuptools.extension import Extension
try:
import ConfigParser as configparser
except ImportError:
import configparser
from pbr import extra_files
import pbr.hooks
# A simplified RE for this; just checks that the line ends with version
# predicates in ()
_VERSION_SPEC_RE = re.compile(r'\s*(.*?)\s*\((.*)\)\s*$')
# Mappings from setup() keyword arguments to setup.cfg options;
# The values are (section, option) tuples, or simply (section,) tuples if
# the option has the same name as the setup() argument
D1_D2_SETUP_ARGS = {
"name": ("metadata",),
"version": ("metadata",),
"author": ("metadata",),
"author_email": ("metadata",),
"maintainer": ("metadata",),
"maintainer_email": ("metadata",),
"url": ("metadata", "home_page"),
"description": ("metadata", "summary"),
"keywords": ("metadata",),
"long_description": ("metadata", "description"),
"download-url": ("metadata",),
"classifiers": ("metadata", "classifier"),
"platforms": ("metadata", "platform"), # **
"license": ("metadata",),
# Use setuptools install_requires, not
# broken distutils requires
"install_requires": ("metadata", "requires_dist"),
"setup_requires": ("metadata", "setup_requires_dist"),
"provides": ("metadata", "provides_dist"), # **
"obsoletes": ("metadata", "obsoletes_dist"), # **
"package_dir": ("files", 'packages_root'),
"packages": ("files",),
"package_data": ("files",),
"namespace_packages": ("files",),
"data_files": ("files",),
"scripts": ("files",),
"py_modules": ("files", "modules"), # **
"cmdclass": ("global", "commands"),
# Not supported in distutils2, but provided for
# backwards compatibility with setuptools
"use_2to3": ("backwards_compat", "use_2to3"),
"zip_safe": ("backwards_compat", "zip_safe"),
"tests_require": ("backwards_compat", "tests_require"),
"dependency_links": ("backwards_compat",),
"include_package_data": ("backwards_compat",),
}
# setup() arguments that can have multiple values in setup.cfg
MULTI_FIELDS = ("classifiers",
"platforms",
"install_requires",
"provides",
"obsoletes",
"namespace_packages",
"packages",
"package_data",
"data_files",
"scripts",
"py_modules",
"dependency_links",
"setup_requires",
"tests_require",
"cmdclass")
# setup() arguments that contain boolean values
BOOL_FIELDS = ("use_2to3", "zip_safe", "include_package_data")
CSV_FIELDS = ("keywords",)
def resolve_name(name):
"""Resolve a name like ``module.object`` to an object and return it.
Raise ImportError if the module or name is not found.
"""
parts = name.split('.')
cursor = len(parts) - 1
module_name = parts[:cursor]
attr_name = parts[-1]
while cursor > 0:
try:
ret = __import__('.'.join(module_name), fromlist=[attr_name])
break
except ImportError:
if cursor == 0:
raise
cursor -= 1
module_name = parts[:cursor]
attr_name = parts[cursor]
ret = ''
for part in parts[cursor:]:
try:
ret = getattr(ret, part)
except AttributeError:
raise ImportError(name)
return ret
def cfg_to_args(path='setup.cfg'):
""" Distutils2 to distutils1 compatibility util.
This method uses an existing setup.cfg to generate a dictionary of
keywords that can be used by distutils.core.setup(kwargs**).
:param file:
The setup.cfg path.
:raises DistutilsFileError:
When the setup.cfg file is not found.
"""
# The method source code really starts here.
parser = configparser.SafeConfigParser()
if not os.path.exists(path):
raise DistutilsFileError("file '%s' does not exist" %
os.path.abspath(path))
parser.read(path)
config = {}
for section in parser.sections():
config[section] = dict(parser.items(section))
# Run setup_hooks, if configured
setup_hooks = has_get_option(config, 'global', 'setup_hooks')
package_dir = has_get_option(config, 'files', 'packages_root')
# Add the source package directory to sys.path in case it contains
# additional hooks, and to make sure it's on the path before any existing
# installations of the package
if package_dir:
package_dir = os.path.abspath(package_dir)
sys.path.insert(0, package_dir)
try:
if setup_hooks:
setup_hooks = [
hook for hook in split_multiline(setup_hooks)
if hook != 'pbr.hooks.setup_hook']
for hook in setup_hooks:
hook_fn = resolve_name(hook)
try :
hook_fn(config)
except SystemExit:
log.error('setup hook %s terminated the installation')
except:
e = sys.exc_info()[1]
log.error('setup hook %s raised exception: %s\n' %
(hook, e))
log.error(traceback.format_exc())
sys.exit(1)
# Run the pbr hook
pbr.hooks.setup_hook(config)
kwargs = setup_cfg_to_setup_kwargs(config)
# Set default config overrides
kwargs['include_package_data'] = True
kwargs['zip_safe'] = False
register_custom_compilers(config)
ext_modules = get_extension_modules(config)
if ext_modules:
kwargs['ext_modules'] = ext_modules
entry_points = get_entry_points(config)
if entry_points:
kwargs['entry_points'] = entry_points
wrap_commands(kwargs)
# Handle the [files]/extra_files option
files_extra_files = has_get_option(config, 'files', 'extra_files')
if files_extra_files:
extra_files.set_extra_files(split_multiline(files_extra_files))
finally:
# Perform cleanup if any paths were added to sys.path
if package_dir:
sys.path.pop(0)
return kwargs
def setup_cfg_to_setup_kwargs(config):
"""Processes the setup.cfg options and converts them to arguments accepted
by setuptools' setup() function.
"""
kwargs = {}
for arg in D1_D2_SETUP_ARGS:
if len(D1_D2_SETUP_ARGS[arg]) == 2:
# The distutils field name is different than distutils2's.
section, option = D1_D2_SETUP_ARGS[arg]
elif len(D1_D2_SETUP_ARGS[arg]) == 1:
# The distutils field name is the same thant distutils2's.
section = D1_D2_SETUP_ARGS[arg][0]
option = arg
in_cfg_value = has_get_option(config, section, option)
if not in_cfg_value:
# There is no such option in the setup.cfg
if arg == "long_description":
in_cfg_value = has_get_option(config, section,
"description_file")
if in_cfg_value:
in_cfg_value = split_multiline(in_cfg_value)
value = ''
for filename in in_cfg_value:
description_file = open(filename)
try:
value += description_file.read().strip() + '\n\n'
finally:
description_file.close()
in_cfg_value = value
else:
continue
if arg in CSV_FIELDS:
in_cfg_value = split_csv(in_cfg_value)
if arg in MULTI_FIELDS:
in_cfg_value = split_multiline(in_cfg_value)
elif arg in BOOL_FIELDS:
# Provide some flexibility here...
if in_cfg_value.lower() in ('true', 't', '1', 'yes', 'y'):
in_cfg_value = True
else:
in_cfg_value = False
if in_cfg_value:
if arg in ('install_requires', 'tests_require'):
# Replaces PEP345-style version specs with the sort expected by
# setuptools
in_cfg_value = [_VERSION_SPEC_RE.sub(r'\1\2', pred)
for pred in in_cfg_value]
elif arg == 'package_dir':
in_cfg_value = {'': in_cfg_value}
elif arg in ('package_data', 'data_files'):
data_files = {}
firstline = True
prev = None
for line in in_cfg_value:
if '=' in line:
key, value = line.split('=', 1)
key, value = (key.strip(), value.strip())
if key in data_files:
# Multiple duplicates of the same package name;
# this is for backwards compatibility of the old
# format prior to d2to1 0.2.6.
prev = data_files[key]
prev.extend(value.split())
else:
prev = data_files[key.strip()] = value.split()
elif firstline:
raise DistutilsOptionError(
'malformed package_data first line %r (misses '
'"=")' % line)
else:
prev.extend(line.strip().split())
firstline = False
if arg == 'data_files':
# the data_files value is a pointlessly different structure
# from the package_data value
data_files = data_files.items()
in_cfg_value = data_files
elif arg == 'cmdclass':
cmdclass = {}
dist = Distribution()
for cls_name in in_cfg_value:
cls = resolve_name(cls_name)
cmd = cls(dist)
cmdclass[cmd.get_command_name()] = cls
in_cfg_value = cmdclass
kwargs[arg] = in_cfg_value
return kwargs
def register_custom_compilers(config):
"""Handle custom compilers; this has no real equivalent in distutils, where
additional compilers could only be added programmatically, so we have to
hack it in somehow.
"""
compilers = has_get_option(config, 'global', 'compilers')
if compilers:
compilers = split_multiline(compilers)
for compiler in compilers:
compiler = resolve_name(compiler)
# In distutils2 compilers these class attributes exist; for
# distutils1 we just have to make something up
if hasattr(compiler, 'name'):
name = compiler.name
else:
name = compiler.__name__
if hasattr(compiler, 'description'):
desc = compiler.description
else:
desc = 'custom compiler %s' % name
module_name = compiler.__module__
# Note; this *will* override built in compilers with the same name
# TODO: Maybe display a warning about this?
cc = distutils.ccompiler.compiler_class
cc[name] = (module_name, compiler.__name__, desc)
# HACK!!!! Distutils assumes all compiler modules are in the
# distutils package
sys.modules['distutils.' + module_name] = sys.modules[module_name]
def get_extension_modules(config):
"""Handle extension modules"""
EXTENSION_FIELDS = ("sources",
"include_dirs",
"define_macros",
"undef_macros",
"library_dirs",
"libraries",
"runtime_library_dirs",
"extra_objects",
"extra_compile_args",
"extra_link_args",
"export_symbols",
"swig_opts",
"depends")
ext_modules = []
for section in config:
if ':' in section:
labels = section.split(':', 1)
else:
# Backwards compatibility for old syntax; don't use this though
labels = section.split('=', 1)
labels = [l.strip() for l in labels]
if (len(labels) == 2) and (labels[0] == 'extension'):
ext_args = {}
for field in EXTENSION_FIELDS:
value = has_get_option(config, section, field)
# All extension module options besides name can have multiple
# values
if not value:
continue
value = split_multiline(value)
if field == 'define_macros':
macros = []
for macro in value:
macro = macro.split('=', 1)
if len(macro) == 1:
macro = (macro[0].strip(), None)
else:
macro = (macro[0].strip(), macro[1].strip())
macros.append(macro)
value = macros
ext_args[field] = value
if ext_args:
if 'name' not in ext_args:
ext_args['name'] = labels[1]
ext_modules.append(Extension(ext_args.pop('name'),
**ext_args))
return ext_modules
def get_entry_points(config):
"""Process the [entry_points] section of setup.cfg to handle setuptools
entry points. This is, of course, not a standard feature of
distutils2/packaging, but as there is not currently a standard alternative
in packaging, we provide support for them.
"""
if not 'entry_points' in config:
return {}
return dict((option, split_multiline(value))
for option, value in config['entry_points'].items())
def wrap_commands(kwargs):
dist = Distribution()
# This should suffice to get the same config values and command classes
# that the actual Distribution will see (not counting cmdclass, which is
# handled below)
dist.parse_config_files()
for cmd, _ in dist.get_command_list():
hooks = {}
for opt, val in dist.get_option_dict(cmd).items():
val = val[1]
if opt.startswith('pre_hook.') or opt.startswith('post_hook.'):
hook_type, alias = opt.split('.', 1)
hook_dict = hooks.setdefault(hook_type, {})
hook_dict[alias] = val
if not hooks:
continue
if 'cmdclass' in kwargs and cmd in kwargs['cmdclass']:
cmdclass = kwargs['cmdclass'][cmd]
else:
cmdclass = dist.get_command_class(cmd)
new_cmdclass = wrap_command(cmd, cmdclass, hooks)
kwargs.setdefault('cmdclass', {})[cmd] = new_cmdclass
def wrap_command(cmd, cmdclass, hooks):
def run(self, cmdclass=cmdclass):
self.run_command_hooks('pre_hook')
cmdclass.run(self)
self.run_command_hooks('post_hook')
return type(cmd, (cmdclass, object),
{'run': run, 'run_command_hooks': run_command_hooks,
'pre_hook': hooks.get('pre_hook'),
'post_hook': hooks.get('post_hook')})
def run_command_hooks(cmd_obj, hook_kind):
"""Run hooks registered for that command and phase.
*cmd_obj* is a finalized command object; *hook_kind* is either
'pre_hook' or 'post_hook'.
"""
if hook_kind not in ('pre_hook', 'post_hook'):
raise ValueError('invalid hook kind: %r' % hook_kind)
hooks = getattr(cmd_obj, hook_kind, None)
if hooks is None:
return
for hook in hooks.values():
if isinstance(hook, str):
try:
hook_obj = resolve_name(hook)
except ImportError:
err = sys.exc_info()[1] # For py3k
raise DistutilsModuleError('cannot find hook %s: %s' %
(hook,err))
else:
hook_obj = hook
if not hasattr(hook_obj, '__call__'):
raise DistutilsOptionError('hook %r is not callable' % hook)
log.info('running %s %s for command %s',
hook_kind, hook, cmd_obj.get_command_name())
try :
hook_obj(cmd_obj)
except:
e = sys.exc_info()[1]
log.error('hook %s raised exception: %s\n' % (hook, e))
log.error(traceback.format_exc())
sys.exit(1)
def has_get_option(config, section, option):
if section in config and option in config[section]:
return config[section][option]
elif section in config and option.replace('_', '-') in config[section]:
return config[section][option.replace('_', '-')]
else:
return False
def split_multiline(value):
"""Special behaviour when we have a multi line options"""
value = [element for element in
(line.strip() for line in value.split('\n'))
if element]
return value
def split_csv(value):
"""Special behaviour when we have a comma separated options"""
value = [element for element in
(chunk.strip() for chunk in value.split(','))
if element]
return value
def monkeypatch_method(cls):
"""A function decorator to monkey-patch a method of the same name on the
given class.
"""
def wrapper(func):
orig = getattr(cls, func.__name__, None)
if orig and not hasattr(orig, '_orig'): # Already patched
setattr(func, '_orig', orig)
setattr(cls, func.__name__, func)
return func
return wrapper
# The following classes are used to hack Distribution.command_options a bit
class DefaultGetDict(defaultdict):
"""Like defaultdict, but the get() method also sets and returns the default
value.
"""
def get(self, key, default=None):
if default is None:
default = self.default_factory()
return super(DefaultGetDict, self).setdefault(key, default)
class IgnoreDict(dict):
"""A dictionary that ignores any insertions in which the key is a string
matching any string in `ignore`. The ignore list can also contain wildcard
patterns using '*'.
"""
def __init__(self, ignore):
self.__ignore = re.compile(r'(%s)' % ('|'.join(
[pat.replace('*', '.*')
for pat in ignore])))
def __setitem__(self, key, val):
if self.__ignore.match(key):
return
super(IgnoreDict, self).__setitem__(key, val)
| gpl-2.0 |
LittleLama/Sick-Beard-BoxCar2 | lib/jsonrpclib/SimpleJSONRPCServer.py | 87 | 8688 | import lib.jsonrpclib
from lib.jsonrpclib import Fault
from lib.jsonrpclib.jsonrpc import USE_UNIX_SOCKETS
import SimpleXMLRPCServer
import SocketServer
import socket
import logging
import os
import types
import traceback
import sys
try:
import fcntl
except ImportError:
# For Windows
fcntl = None
def get_version(request):
# must be a dict
if 'jsonrpc' in request.keys():
return 2.0
if 'id' in request.keys():
return 1.0
return None
def validate_request(request):
if type(request) is not types.DictType:
fault = Fault(
-32600, 'Request must be {}, not %s.' % type(request)
)
return fault
rpcid = request.get('id', None)
version = get_version(request)
if not version:
fault = Fault(-32600, 'Request %s invalid.' % request, rpcid=rpcid)
return fault
request.setdefault('params', [])
method = request.get('method', None)
params = request.get('params')
param_types = (types.ListType, types.DictType, types.TupleType)
if not method or type(method) not in types.StringTypes or \
type(params) not in param_types:
fault = Fault(
-32600, 'Invalid request parameters or method.', rpcid=rpcid
)
return fault
return True
class SimpleJSONRPCDispatcher(SimpleXMLRPCServer.SimpleXMLRPCDispatcher):
def __init__(self, encoding=None):
SimpleXMLRPCServer.SimpleXMLRPCDispatcher.__init__(self,
allow_none=True,
encoding=encoding)
def _marshaled_dispatch(self, data, dispatch_method = None):
response = None
try:
request = jsonrpclib.loads(data)
except Exception, e:
fault = Fault(-32700, 'Request %s invalid. (%s)' % (data, e))
response = fault.response()
return response
if not request:
fault = Fault(-32600, 'Request invalid -- no request data.')
return fault.response()
if type(request) is types.ListType:
# This SHOULD be a batch, by spec
responses = []
for req_entry in request:
result = validate_request(req_entry)
if type(result) is Fault:
responses.append(result.response())
continue
resp_entry = self._marshaled_single_dispatch(req_entry)
if resp_entry is not None:
responses.append(resp_entry)
if len(responses) > 0:
response = '[%s]' % ','.join(responses)
else:
response = ''
else:
result = validate_request(request)
if type(result) is Fault:
return result.response()
response = self._marshaled_single_dispatch(request)
return response
def _marshaled_single_dispatch(self, request):
# TODO - Use the multiprocessing and skip the response if
# it is a notification
# Put in support for custom dispatcher here
# (See SimpleXMLRPCServer._marshaled_dispatch)
method = request.get('method')
params = request.get('params')
try:
response = self._dispatch(method, params)
except:
exc_type, exc_value, exc_tb = sys.exc_info()
fault = Fault(-32603, '%s:%s' % (exc_type, exc_value))
return fault.response()
if 'id' not in request.keys() or request['id'] == None:
# It's a notification
return None
try:
response = jsonrpclib.dumps(response,
methodresponse=True,
rpcid=request['id']
)
return response
except:
exc_type, exc_value, exc_tb = sys.exc_info()
fault = Fault(-32603, '%s:%s' % (exc_type, exc_value))
return fault.response()
def _dispatch(self, method, params):
func = None
try:
func = self.funcs[method]
except KeyError:
if self.instance is not None:
if hasattr(self.instance, '_dispatch'):
return self.instance._dispatch(method, params)
else:
try:
func = SimpleXMLRPCServer.resolve_dotted_attribute(
self.instance,
method,
True
)
except AttributeError:
pass
if func is not None:
try:
if type(params) is types.ListType:
response = func(*params)
else:
response = func(**params)
return response
except TypeError:
return Fault(-32602, 'Invalid parameters.')
except:
err_lines = traceback.format_exc().splitlines()
trace_string = '%s | %s' % (err_lines[-3], err_lines[-1])
fault = jsonrpclib.Fault(-32603, 'Server error: %s' %
trace_string)
return fault
else:
return Fault(-32601, 'Method %s not supported.' % method)
class SimpleJSONRPCRequestHandler(
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
def do_POST(self):
if not self.is_rpc_path_valid():
self.report_404()
return
try:
max_chunk_size = 10*1024*1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
L.append(self.rfile.read(chunk_size))
size_remaining -= len(L[-1])
data = ''.join(L)
response = self.server._marshaled_dispatch(data)
self.send_response(200)
except Exception, e:
self.send_response(500)
err_lines = traceback.format_exc().splitlines()
trace_string = '%s | %s' % (err_lines[-3], err_lines[-1])
fault = jsonrpclib.Fault(-32603, 'Server error: %s' % trace_string)
response = fault.response()
if response == None:
response = ''
self.send_header("Content-type", "application/json-rpc")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
self.wfile.flush()
self.connection.shutdown(1)
class SimpleJSONRPCServer(SocketServer.TCPServer, SimpleJSONRPCDispatcher):
allow_reuse_address = True
def __init__(self, addr, requestHandler=SimpleJSONRPCRequestHandler,
logRequests=True, encoding=None, bind_and_activate=True,
address_family=socket.AF_INET):
self.logRequests = logRequests
SimpleJSONRPCDispatcher.__init__(self, encoding)
# TCPServer.__init__ has an extra parameter on 2.6+, so
# check Python version and decide on how to call it
vi = sys.version_info
self.address_family = address_family
if USE_UNIX_SOCKETS and address_family == socket.AF_UNIX:
# Unix sockets can't be bound if they already exist in the
# filesystem. The convention of e.g. X11 is to unlink
# before binding again.
if os.path.exists(addr):
try:
os.unlink(addr)
except OSError:
logging.warning("Could not unlink socket %s", addr)
# if python 2.5 and lower
if vi[0] < 3 and vi[1] < 6:
SocketServer.TCPServer.__init__(self, addr, requestHandler)
else:
SocketServer.TCPServer.__init__(self, addr, requestHandler,
bind_and_activate)
if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
class CGIJSONRPCRequestHandler(SimpleJSONRPCDispatcher):
def __init__(self, encoding=None):
SimpleJSONRPCDispatcher.__init__(self, encoding)
def handle_jsonrpc(self, request_text):
response = self._marshaled_dispatch(request_text)
print 'Content-Type: application/json-rpc'
print 'Content-Length: %d' % len(response)
print
sys.stdout.write(response)
handle_xmlrpc = handle_jsonrpc
| gpl-3.0 |
passiweinberger/passiweinberger.github.io | presentations/HTM_Intro/node_modules/node-gyp/gyp/pylib/gyp/MSVSUserFile.py | 2710 | 5094 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
| apache-2.0 |
beacloudgenius/edx-platform | lms/djangoapps/courseware/courses.py | 10 | 15401 | from collections import defaultdict
from fs.errors import ResourceNotFoundError
import logging
import inspect
from path import path
from django.http import Http404
from django.conf import settings
from edxmako.shortcuts import render_to_string
from xmodule.modulestore import ModuleStoreEnum
from opaque_keys.edx.keys import CourseKey
from xmodule.modulestore.django import modulestore
from xmodule.contentstore.content import StaticContent
from xmodule.modulestore.exceptions import ItemNotFoundError
from static_replace import replace_static_urls
from xmodule.modulestore import ModuleStoreEnum
from xmodule.x_module import STUDENT_VIEW
from microsite_configuration import microsite
from courseware.access import has_access
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module
from student.models import CourseEnrollment
import branding
from opaque_keys.edx.keys import UsageKey
log = logging.getLogger(__name__)
def get_request_for_thread():
"""Walk up the stack, return the nearest first argument named "request"."""
frame = None
try:
for f in inspect.stack()[1:]:
frame = f[0]
code = frame.f_code
if code.co_varnames[:1] == ("request",):
return frame.f_locals["request"]
elif code.co_varnames[:2] == ("self", "request",):
return frame.f_locals["request"]
finally:
del frame
def get_course(course_id, depth=0):
"""
Given a course id, return the corresponding course descriptor.
If the course does not exist, raises a ValueError. This is appropriate
for internal use.
depth: The number of levels of children for the modulestore to cache.
None means infinite depth. Default is to fetch no children.
"""
course = modulestore().get_course(course_id, depth=depth)
if course is None:
raise ValueError(u"Course not found: {0}".format(course_id))
return course
# TODO please rename this function to get_course_by_key at next opportunity!
def get_course_by_id(course_key, depth=0):
"""
Given a course id, return the corresponding course descriptor.
If such a course does not exist, raises a 404.
depth: The number of levels of children for the modulestore to cache. None means infinite depth
"""
with modulestore().bulk_operations(course_key):
course = modulestore().get_course(course_key, depth=depth)
if course:
return course
else:
raise Http404("Course not found.")
class UserNotEnrolled(Http404):
def __init__(self, course_key):
super(UserNotEnrolled, self).__init__()
self.course_key = course_key
def get_course_with_access(user, action, course_key, depth=0, check_if_enrolled=False):
"""
Given a course_key, look up the corresponding course descriptor,
check that the user has the access to perform the specified action
on the course, and return the descriptor.
Raises a 404 if the course_key is invalid, or the user doesn't have access.
depth: The number of levels of children for the modulestore to cache. None means infinite depth
"""
assert isinstance(course_key, CourseKey)
course = get_course_by_id(course_key, depth=depth)
if not has_access(user, action, course, course_key):
if check_if_enrolled and not CourseEnrollment.is_enrolled(user, course_key):
# If user is not enrolled, raise UserNotEnrolled exception that will
# be caught by middleware
raise UserNotEnrolled(course_key)
# Deliberately return a non-specific error message to avoid
# leaking info about access control settings
raise Http404("Course not found.")
return course
def get_opt_course_with_access(user, action, course_key):
"""
Same as get_course_with_access, except that if course_key is None,
return None without performing any access checks.
"""
if course_key is None:
return None
return get_course_with_access(user, action, course_key)
def course_image_url(course):
"""Try to look up the image url for the course. If it's not found,
log an error and return the dead link"""
if course.static_asset_path or modulestore().get_modulestore_type(course.id) == ModuleStoreEnum.Type.xml:
# If we are a static course with the course_image attribute
# set different than the default, return that path so that
# courses can use custom course image paths, otherwise just
# return the default static path.
url = '/static/' + (course.static_asset_path or getattr(course, 'data_dir', ''))
if hasattr(course, 'course_image') and course.course_image != course.fields['course_image'].default:
url += '/' + course.course_image
else:
url += '/images/course_image.jpg'
elif course.course_image == '':
# if course_image is empty the url will be blank as location
# of the course_image does not exist
url = ''
else:
loc = StaticContent.compute_location(course.id, course.course_image)
url = StaticContent.serialize_asset_key_with_slash(loc)
return url
def find_file(filesystem, dirs, filename):
"""
Looks for a filename in a list of dirs on a filesystem, in the specified order.
filesystem: an OSFS filesystem
dirs: a list of path objects
filename: a string
Returns d / filename if found in dir d, else raises ResourceNotFoundError.
"""
for directory in dirs:
filepath = path(directory) / filename
if filesystem.exists(filepath):
return filepath
raise ResourceNotFoundError(u"Could not find {0}".format(filename))
def get_course_about_section(course, section_key):
"""
This returns the snippet of html to be rendered on the course about page,
given the key for the section.
Valid keys:
- overview
- title
- university
- number
- short_description
- description
- key_dates (includes start, end, exams, etc)
- video
- course_staff_short
- course_staff_extended
- requirements
- syllabus
- textbook
- faq
- more_info
- ocw_links
"""
# Many of these are stored as html files instead of some semantic
# markup. This can change without effecting this interface when we find a
# good format for defining so many snippets of text/html.
# TODO: Remove number, instructors from this list
if section_key in ['short_description', 'description', 'key_dates', 'video',
'course_staff_short', 'course_staff_extended',
'requirements', 'syllabus', 'textbook', 'faq', 'more_info',
'number', 'instructors', 'overview',
'effort', 'end_date', 'prerequisites', 'ocw_links']:
try:
request = get_request_for_thread()
loc = course.location.replace(category='about', name=section_key)
# Use an empty cache
field_data_cache = FieldDataCache([], course.id, request.user)
about_module = get_module(
request.user,
request,
loc,
field_data_cache,
log_if_not_found=False,
wrap_xmodule_display=False,
static_asset_path=course.static_asset_path
)
html = ''
if about_module is not None:
try:
html = about_module.render(STUDENT_VIEW).content
except Exception: # pylint: disable=broad-except
html = render_to_string('courseware/error-message.html', None)
log.exception(
u"Error rendering course={course}, section_key={section_key}".format(
course=course, section_key=section_key
))
return html
except ItemNotFoundError:
log.warning(
u"Missing about section {key} in course {url}".format(key=section_key, url=course.location.to_deprecated_string())
)
return None
elif section_key == "title":
return course.display_name_with_default
elif section_key == "university":
return course.display_org_with_default
elif section_key == "number":
return course.display_number_with_default
raise KeyError("Invalid about key " + str(section_key))
def get_course_info_section_module(request, course, section_key):
"""
This returns the course info module for a given section_key.
Valid keys:
- handouts
- guest_handouts
- updates
- guest_updates
"""
usage_key = course.id.make_usage_key('course_info', section_key)
# Use an empty cache
field_data_cache = FieldDataCache([], course.id, request.user)
return get_module(
request.user,
request,
usage_key,
field_data_cache,
log_if_not_found=False,
wrap_xmodule_display=False,
static_asset_path=course.static_asset_path
)
def get_course_info_section(request, course, section_key):
"""
This returns the snippet of html to be rendered on the course info page,
given the key for the section.
Valid keys:
- handouts
- guest_handouts
- updates
- guest_updates
"""
info_module = get_course_info_section_module(request, course, section_key)
html = ''
if info_module is not None:
try:
html = info_module.render(STUDENT_VIEW).content
except Exception: # pylint: disable=broad-except
html = render_to_string('courseware/error-message.html', None)
log.exception(
u"Error rendering course={course}, section_key={section_key}".format(
course=course, section_key=section_key
))
return html
# TODO: Fix this such that these are pulled in as extra course-specific tabs.
# arjun will address this by the end of October if no one does so prior to
# then.
def get_course_syllabus_section(course, section_key):
"""
This returns the snippet of html to be rendered on the syllabus page,
given the key for the section.
Valid keys:
- syllabus
- guest_syllabus
"""
# Many of these are stored as html files instead of some semantic
# markup. This can change without effecting this interface when we find a
# good format for defining so many snippets of text/html.
if section_key in ['syllabus', 'guest_syllabus']:
try:
filesys = course.system.resources_fs
# first look for a run-specific version
dirs = [path("syllabus") / course.url_name, path("syllabus")]
filepath = find_file(filesys, dirs, section_key + ".html")
with filesys.open(filepath) as html_file:
return replace_static_urls(
html_file.read().decode('utf-8'),
getattr(course, 'data_dir', None),
course_id=course.id,
static_asset_path=course.static_asset_path,
)
except ResourceNotFoundError:
log.exception(
u"Missing syllabus section {key} in course {url}".format(key=section_key, url=course.location.to_deprecated_string())
)
return "! Syllabus missing !"
raise KeyError("Invalid about key " + str(section_key))
def get_courses_by_university(user, domain=None):
'''
Returns dict of lists of courses available, keyed by course.org (ie university).
Courses are sorted by course.number.
'''
# TODO: Clean up how 'error' is done.
# filter out any courses that errored.
visible_courses = get_courses(user, domain)
universities = defaultdict(list)
for course in visible_courses:
universities[course.org].append(course)
return universities
def get_courses(user, domain=None):
'''
Returns a list of courses available, sorted by course.number
'''
courses = branding.get_visible_courses()
permission_name = microsite.get_value(
'COURSE_CATALOG_VISIBILITY_PERMISSION',
settings.COURSE_CATALOG_VISIBILITY_PERMISSION
)
courses = [c for c in courses if has_access(user, permission_name, c)]
courses = sorted(courses, key=lambda course: course.number)
return courses
def sort_by_announcement(courses):
"""
Sorts a list of courses by their announcement date. If the date is
not available, sort them by their start date.
"""
# Sort courses by how far are they from they start day
key = lambda course: course.sorting_score
courses = sorted(courses, key=key)
return courses
def sort_by_start_date(courses):
"""
Returns a list of courses sorted by their start date, latest first.
"""
courses = sorted(
courses,
key=lambda course: (course.has_ended(), course.start is None, course.start),
reverse=False
)
return courses
def get_cms_course_link(course, page='course'):
"""
Returns a link to course_index for editing the course in cms,
assuming that the course is actually cms-backed.
"""
# This is fragile, but unfortunately the problem is that within the LMS we
# can't use the reverse calls from the CMS
return u"//{}/{}/{}".format(settings.CMS_BASE, page, unicode(course.id))
def get_cms_block_link(block, page):
"""
Returns a link to block_index for editing the course in cms,
assuming that the block is actually cms-backed.
"""
# This is fragile, but unfortunately the problem is that within the LMS we
# can't use the reverse calls from the CMS
return u"//{}/{}/{}".format(settings.CMS_BASE, page, block.location)
def get_studio_url(course, page):
"""
Get the Studio URL of the page that is passed in.
Args:
course (CourseDescriptor)
"""
is_studio_course = course.course_edit_method == "Studio"
is_mongo_course = modulestore().get_modulestore_type(course.id) != ModuleStoreEnum.Type.xml
studio_link = None
if is_studio_course and is_mongo_course:
studio_link = get_cms_course_link(course, page)
return studio_link
def get_problems_in_section(section):
"""
This returns a dict having problems in a section.
Returning dict has problem location as keys and problem
descriptor as values.
"""
problem_descriptors = defaultdict()
if not isinstance(section, UsageKey):
section_key = UsageKey.from_string(section)
else:
section_key = section
# it will be a Mongo performance boost, if you pass in a depth=3 argument here
# as it will optimize round trips to the database to fetch all children for the current node
section_descriptor = modulestore().get_item(section_key, depth=3)
# iterate over section, sub-section, vertical
for subsection in section_descriptor.get_children():
for vertical in subsection.get_children():
for component in vertical.get_children():
if component.location.category == 'problem' and getattr(component, 'has_score', False):
problem_descriptors[unicode(component.location)] = component
return problem_descriptors
| agpl-3.0 |
NunoEdgarGub1/scikit-learn | sklearn/utils/sparsetools/tests/test_traversal.py | 315 | 2001 | from __future__ import division, print_function, absolute_import
from nose import SkipTest
import numpy as np
from numpy.testing import assert_array_almost_equal
try:
from scipy.sparse.csgraph import breadth_first_tree, depth_first_tree,\
csgraph_to_dense, csgraph_from_dense
except ImportError:
# Oldish versions of scipy don't have that
csgraph_from_dense = None
def test_graph_breadth_first():
if csgraph_from_dense is None:
raise SkipTest("Old version of scipy, doesn't have csgraph.")
csgraph = np.array([[0, 1, 2, 0, 0],
[1, 0, 0, 0, 3],
[2, 0, 0, 7, 0],
[0, 0, 7, 0, 1],
[0, 3, 0, 1, 0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
bfirst = np.array([[0, 1, 2, 0, 0],
[0, 0, 0, 0, 3],
[0, 0, 0, 7, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
for directed in [True, False]:
bfirst_test = breadth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(bfirst_test),
bfirst)
def test_graph_depth_first():
if csgraph_from_dense is None:
raise SkipTest("Old version of scipy, doesn't have csgraph.")
csgraph = np.array([[0, 1, 2, 0, 0],
[1, 0, 0, 0, 3],
[2, 0, 0, 7, 0],
[0, 0, 7, 0, 1],
[0, 3, 0, 1, 0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
dfirst = np.array([[0, 1, 0, 0, 0],
[0, 0, 0, 0, 3],
[0, 0, 0, 0, 0],
[0, 0, 7, 0, 0],
[0, 0, 0, 1, 0]])
for directed in [True, False]:
dfirst_test = depth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(dfirst_test),
dfirst)
| bsd-3-clause |
wemanuel/smry | server-auth/ee/image.py | 7 | 14057 | #!/usr/bin/env python
"""A representation of an Earth Engine image.
See: https://sites.google.com/site/earthengineapidocs for more details.
"""
# Using lowercase function naming to match the JavaScript names.
# pylint: disable=g-bad-name
import json
import apifunction
import computedobject
import data
import deprecation
import ee_exception
import ee_types
import element
import function
import geometry
class Image(element.Element):
"""An object to represent an Earth Engine image."""
_initialized = False
def __init__(self, args=None, version=None):
"""Constructs an Earth Engine image.
Args:
args: This constructor accepts a variety of arguments:
- A string - an EarthEngine asset id,
- A string and a number - an EarthEngine asset id and version,
- A number - creates a constant image,
- An EEArray - creates a constant array image,
- A list - creates an image out of each element of the array and
combines them into a single image,
- An ee.Image - returns the argument,
- Nothing - results in an empty transparent image.
version: An optional asset version.
Raises:
EEException: if passed something other than the above.
"""
self.initialize()
if version is not None:
if ee_types.isString(args) and ee_types.isNumber(version):
# An ID and version.
super(Image, self).__init__(
apifunction.ApiFunction.lookup('Image.load'),
{'id': args, 'version': version})
else:
raise ee_exception.EEException(
'If version is specified, the arg to Image() must be a string. '
'Received: %s' % (args,))
return
if ee_types.isNumber(args):
# A constant image.
super(Image, self).__init__(
apifunction.ApiFunction.lookup('Image.constant'), {'value': args})
elif ee_types.isString(args):
# An ID.
super(Image, self).__init__(
apifunction.ApiFunction.lookup('Image.load'), {'id': args})
elif isinstance(args, (list, tuple)):
# Make an image out of each element.
image = Image.combine_([Image(i) for i in args])
super(Image, self).__init__(image.func, image.args)
elif isinstance(args, computedobject.ComputedObject):
if args.name() == 'Array':
# A constant array image.
super(Image, self).__init__(
apifunction.ApiFunction.lookup('Image.constant'), {'value': args})
else:
# A custom object to reinterpret as an Image.
super(Image, self).__init__(args.func, args.args, args.varName)
elif args is None:
super(Image, self).__init__(
apifunction.ApiFunction.lookup('Image.mask'),
{'image': Image(0), 'mask': Image(0)})
else:
raise ee_exception.EEException(
'Unrecognized argument type to convert to an Image: %s' % args)
@classmethod
def initialize(cls):
"""Imports API functions to this class."""
if not cls._initialized:
apifunction.ApiFunction.importApi(cls, 'Image', 'Image')
apifunction.ApiFunction.importApi(cls, 'Window', 'Image', 'focal_')
cls._initialized = True
@classmethod
def reset(cls):
"""Removes imported API functions from this class."""
apifunction.ApiFunction.clearApi(cls)
cls._initialized = False
def getInfo(self):
"""Fetch and return information about this image.
Returns:
The return contents vary but will include at least:
bands - Array containing metadata about the bands in the image,
properties - Dictionary containing the image's metadata properties.
"""
return super(Image, self).getInfo()
def getMapId(self, vis_params=None):
"""Fetch and return a map id and token, suitable for use in a Map overlay.
Args:
vis_params: The visualization parameters. See ee.data.getMapId.
Returns:
An object containing a mapid and access token, or an error message.
"""
request = (vis_params or {}).copy()
request['image'] = self.serialize()
response = data.getMapId(request)
response['image'] = self
return response
def getDownloadURL(self, params=None):
"""Get a download URL for this image.
Args:
params: An object containing visualization options with the following
possible values:
name - a base name to use when constructing filenames.
bands - a description of the bands to download. Must be an array of
dictionaries, each with the following keys:
id - the name of the band, a string, required.
crs - an optional CRS string defining the band projection.
crs_transform - an optional array of 6 numbers specifying an affine
transform from the specified CRS, in the order: xScale, yShearing,
xShearing, yScale, xTranslation and yTranslation.
dimensions - an optional array of two integers defining the width and
height to which the band is cropped.
scale - an optional number, specifying the scale in meters of the
band; ignored if crs and crs_transform is specified.
crs - a default CRS string to use for any bands that do not explicitly
specify one.
crs_transform - a default affine transform to use for any bands that do
not specify one, of the same format as the crs_transform of bands.
dimensions - default image cropping dimensions to use for any bands
that do not specify them.
scale - a default scale to use for any bands that do not specify one;
ignored if crs and crs_transform is specified.
region - a polygon specifying a region to download; ignored if crs
and crs_transform is specified.
Returns:
A URL to download the specified image.
"""
request = params or {}
request['image'] = self.serialize()
return data.makeDownloadUrl(data.getDownloadId(request))
def getThumbURL(self, params=None):
"""Get a thumbnail URL for this image.
Args:
params: Parameters identical to getMapId, plus, optionally:
dimensions - (a number or pair of numbers in format WIDTHxHEIGHT) Max
dimensions of the thumbnail to render, in pixels. If only one number
is passed, it is used as the maximum, and the other dimension is
computed by proportional scaling.
region - (E,S,W,N or GeoJSON) Geospatial region of the image
to render. By default, the whole image.
format - (string) Either 'png' or 'jpg'.
Returns:
A URL to download a thumbnail the specified image.
Raises:
EEException: If the region parameter is not an array or GeoJSON object.
"""
request = params or {}
request['image'] = self.serialize()
if request.has_key('region'):
if (isinstance(request['region'], dict) or
isinstance(request['region'], list)):
request['region'] = json.dumps(request['region'])
elif not isinstance(request['region'], str):
raise ee_exception.EEException(
'The region parameter must be an array or a GeoJSON object.')
return data.makeThumbUrl(data.getThumbId(request))
# Deprecated spellings to match the JS library.
getDownloadUrl = deprecation.Deprecated('Use getDownloadURL().')(
getDownloadURL)
getThumbUrl = deprecation.Deprecated('Use getThumbURL().')(getThumbURL)
###################################################
# Static methods.
###################################################
@staticmethod
def rgb(r, g, b):
"""Create a 3-band image.
This creates a 3-band image specifically for visualization using
the first band in each image.
Args:
r: The red image.
g: The green image.
b: The blue image.
Returns:
The combined image.
"""
return Image.combine_([r, g, b], ['vis-red', 'vis-green', 'vis-blue'])
@staticmethod
def cat(*args):
"""Concatenate the given images together into a single image."""
return Image.combine_(args)
@staticmethod
def combine_(images, names=None):
"""Combine all the bands from the given images into a single image.
Args:
images: The images to be combined.
names: An array of names for the output bands.
Returns:
The combined image.
"""
if not images:
raise ee_exception.EEException('Can\'t combine 0 images.')
# Append all the bands.
result = Image(images[0])
for image in images[1:]:
result = apifunction.ApiFunction.call_('Image.addBands', result, image)
# Optionally, rename the bands of the result.
if names:
result = result.select(['.*'], names)
return result
def select(self, opt_selectors=None, opt_names=None, *args):
"""Selects bands from an image.
Can be called in one of two ways:
- Passed any number of non-list arguments. All of these will be
interpreted as band selectors. These can be band names, regexes, or
numeric indices. E.g.
selected = image.select('a', 'b', 3, 'd');
- Passed two lists. The first will be used as band selectors and the
second as new names for the selected bands. The number of new names
must match the number of selected bands. E.g.
selected = image.select(['a', 4], ['newA', 'newB']);
Args:
opt_selectors: An array of names, regexes or numeric indices specifying
the bands to select.
opt_names: An array of strings specifying the new names for the
selected bands.
*args: Selector elements as varargs.
Returns:
An image with the selected bands.
"""
if opt_selectors is not None:
args = list(args)
if opt_names is not None:
args.insert(0, opt_names)
args.insert(0, opt_selectors)
algorithm_args = {
'input': self,
'bandSelectors': args[0] if args else [],
}
if args:
# If the user didn't pass an array as the first argument, assume
# that everything in the arguments array is actually a selector.
if (len(args) > 2 or
ee_types.isString(args[0]) or
ee_types.isNumber(args[0])):
# Varargs inputs.
selectors = args
# Verify we didn't get anything unexpected.
for selector in selectors:
if (not ee_types.isString(selector) and
not ee_types.isNumber(selector) and
not isinstance(selector, computedobject.ComputedObject)):
raise ee_exception.EEException(
'Illegal argument to select(): ' + selector)
algorithm_args['bandSelectors'] = selectors
elif len(args) > 1:
algorithm_args['newNames'] = args[1]
return apifunction.ApiFunction.apply_('Image.select', algorithm_args)
def expression(self, expression, opt_map=None):
"""Evaluates an arithmetic expression on an image or images.
The bands of the primary input image are available using the built-in
function b(), as b(0) or b('band_name').
Variables in the expression are interpreted as additional image parameters
which must be supplied in opt_map. The bands of each such image can be
accessed like image.band_name or image[0].
Both b() and image[] allow multiple arguments, to specify multiple bands,
such as b(1, 'name', 3). Calling b() with no arguments, or using a variable
by itself, returns all bands of the image.
Args:
expression: The expression to evaluate.
opt_map: An optional map of input images available by name.
Returns:
The image computed by the provided expression.
"""
arg_name = 'DEFAULT_EXPRESSION_IMAGE'
all_vars = [arg_name]
args = {arg_name: self}
# Add custom arguments, promoting them to Images manually.
if opt_map:
for name, value in opt_map.iteritems():
all_vars.append(name)
args[name] = Image(value)
body = apifunction.ApiFunction.call_(
'Image.parseExpression', expression, arg_name, all_vars)
# Reinterpret the body call as an ee.Function by hand-generating the
# signature so the computed function knows its input and output types.
class ReinterpretedFunction(function.Function):
def encode(self, encoder):
return body.encode(encoder)
def getSignature(self):
return {
'name': '',
'args': [{'name': name, 'type': 'Image', 'optional': False}
for name in all_vars],
'returns': 'Image'
}
# Perform the call.
return ReinterpretedFunction().apply(args)
def clip(self, clip_geometry):
"""Clips an image by a Geometry, Feature or FeatureCollection.
Args:
clip_geometry: The Geometry, Feature or FeatureCollection to clip to.
Returns:
The clipped image.
"""
try:
# Need to manually promote GeoJSON, because the signature does not
# specify the type so auto promotion won't work.
clip_geometry = geometry.Geometry(clip_geometry)
except ee_exception.EEException:
pass # Not an ee.Geometry or GeoJSON. Just pass it along.
return apifunction.ApiFunction.call_('Image.clip', self, clip_geometry)
def rename(self, names, *args):
"""Rename the bands of an image.
Can be called with either a list of strings or any number of strings.
Args:
names: An array of strings specifying the new names for the
bands. Must exactly match the number of bands in the image.
*args: Band names as varargs.
Returns:
An image with the renamed bands.
"""
if args:
# Handle varargs; everything else we let the server handle.
args = list(args)
args.insert(0, names)
names = args
algorithm_args = {
'input': self,
'names': names
}
return apifunction.ApiFunction.apply_('Image.rename', algorithm_args)
@staticmethod
def name():
return 'Image'
| apache-2.0 |
csuttles/utils | python/todo-api/flask/lib/python2.7/site-packages/pip/compat/__init__.py | 248 | 3402 | """Stuff that differs in different Python versions and platform
distributions."""
from __future__ import absolute_import, division
import os
import sys
from pip._vendor.six import text_type
try:
from logging.config import dictConfig as logging_dictConfig
except ImportError:
from pip.compat.dictconfig import dictConfig as logging_dictConfig
try:
import ipaddress
except ImportError:
try:
from pip._vendor import ipaddress
except ImportError:
import ipaddr as ipaddress
ipaddress.ip_address = ipaddress.IPAddress
ipaddress.ip_network = ipaddress.IPNetwork
__all__ = [
"logging_dictConfig", "ipaddress", "uses_pycache", "console_to_str",
"native_str", "get_path_uid", "stdlib_pkgs", "WINDOWS",
]
if sys.version_info >= (3, 4):
uses_pycache = True
from importlib.util import cache_from_source
else:
import imp
uses_pycache = hasattr(imp, 'cache_from_source')
if uses_pycache:
cache_from_source = imp.cache_from_source
else:
cache_from_source = None
if sys.version_info >= (3,):
def console_to_str(s):
try:
return s.decode(sys.__stdout__.encoding)
except UnicodeDecodeError:
return s.decode('utf_8')
def native_str(s, replace=False):
if isinstance(s, bytes):
return s.decode('utf-8', 'replace' if replace else 'strict')
return s
else:
def console_to_str(s):
return s
def native_str(s, replace=False):
# Replace is ignored -- unicode to UTF-8 can't fail
if isinstance(s, text_type):
return s.encode('utf-8')
return s
def total_seconds(td):
if hasattr(td, "total_seconds"):
return td.total_seconds()
else:
val = td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6
return val / 10 ** 6
def get_path_uid(path):
"""
Return path's uid.
Does not follow symlinks:
https://github.com/pypa/pip/pull/935#discussion_r5307003
Placed this function in compat due to differences on AIX and
Jython, that should eventually go away.
:raises OSError: When path is a symlink or can't be read.
"""
if hasattr(os, 'O_NOFOLLOW'):
fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW)
file_uid = os.fstat(fd).st_uid
os.close(fd)
else: # AIX and Jython
# WARNING: time of check vulnerabity, but best we can do w/o NOFOLLOW
if not os.path.islink(path):
# older versions of Jython don't have `os.fstat`
file_uid = os.stat(path).st_uid
else:
# raise OSError for parity with os.O_NOFOLLOW above
raise OSError(
"%s is a symlink; Will not return uid for symlinks" % path
)
return file_uid
# packages in the stdlib that may have installation metadata, but should not be
# considered 'installed'. this theoretically could be determined based on
# dist.location (py27:`sysconfig.get_paths()['stdlib']`,
# py26:sysconfig.get_config_vars('LIBDEST')), but fear platform variation may
# make this ineffective, so hard-coding
stdlib_pkgs = ['python', 'wsgiref']
if sys.version_info >= (2, 7):
stdlib_pkgs.extend(['argparse'])
# windows detection, covers cpython and ironpython
WINDOWS = (sys.platform.startswith("win") or
(sys.platform == 'cli' and os.name == 'nt'))
| apache-2.0 |
mianos/micropython | drivers/nrf24l01/nrf24l01.py | 37 | 7138 | """NRF24L01 driver for Micro Python
"""
import pyb
# nRF24L01+ registers
CONFIG = const(0x00)
EN_RXADDR = const(0x02)
SETUP_AW = const(0x03)
SETUP_RETR = const(0x04)
RF_CH = const(0x05)
RF_SETUP = const(0x06)
STATUS = const(0x07)
RX_ADDR_P0 = const(0x0a)
TX_ADDR = const(0x10)
RX_PW_P0 = const(0x11)
FIFO_STATUS = const(0x17)
DYNPD = const(0x1c)
# CONFIG register
EN_CRC = const(0x08) # enable CRC
CRCO = const(0x04) # CRC encoding scheme; 0=1 byte, 1=2 bytes
PWR_UP = const(0x02) # 1=power up, 0=power down
PRIM_RX = const(0x01) # RX/TX control; 0=PTX, 1=PRX
# RF_SETUP register
POWER_0 = const(0x00) # -18 dBm
POWER_1 = const(0x02) # -12 dBm
POWER_2 = const(0x04) # -6 dBm
POWER_3 = const(0x06) # 0 dBm
SPEED_1M = const(0x00)
SPEED_2M = const(0x08)
SPEED_250K = const(0x20)
# STATUS register
RX_DR = const(0x40) # RX data ready; write 1 to clear
TX_DS = const(0x20) # TX data sent; write 1 to clear
MAX_RT = const(0x10) # max retransmits reached; write 1 to clear
# FIFO_STATUS register
RX_EMPTY = const(0x01) # 1 if RX FIFO is empty
# constants for instructions
R_RX_PL_WID = const(0x60) # read RX payload width
R_RX_PAYLOAD = const(0x61) # read RX payload
W_TX_PAYLOAD = const(0xa0) # write TX payload
FLUSH_TX = const(0xe1) # flush TX FIFO
FLUSH_RX = const(0xe2) # flush RX FIFO
NOP = const(0xff) # use to read STATUS register
class NRF24L01:
def __init__(self, spi, cs, ce, channel=46, payload_size=16):
assert payload_size <= 32
# init the SPI bus and pins
spi.init(spi.MASTER, baudrate=4000000, polarity=0, phase=0, firstbit=spi.MSB)
cs.init(cs.OUT_PP, cs.PULL_NONE)
ce.init(ce.OUT_PP, ce.PULL_NONE)
# store the pins
self.spi = spi
self.cs = cs
self.ce = ce
# reset everything
self.ce.low()
self.cs.high()
self.payload_size = payload_size
self.pipe0_read_addr = None
pyb.delay(5)
# set address width to 5 bytes and check for device present
self.reg_write(SETUP_AW, 0b11)
if self.reg_read(SETUP_AW) != 0b11:
raise OSError("nRF24L01+ Hardware not responding")
# disable dynamic payloads
self.reg_write(DYNPD, 0)
# auto retransmit delay: 1750us
# auto retransmit count: 8
self.reg_write(SETUP_RETR, (6 << 4) | 8)
# set rf power and speed
self.set_power_speed(POWER_3, SPEED_250K) # Best for point to point links
# init CRC
self.set_crc(2)
# clear status flags
self.reg_write(STATUS, RX_DR | TX_DS | MAX_RT)
# set channel
self.set_channel(channel)
# flush buffers
self.flush_rx()
self.flush_tx()
def reg_read(self, reg):
self.cs.low()
self.spi.send_recv(reg)
buf = self.spi.recv(1)
self.cs.high()
return buf[0]
def reg_write(self, reg, buf):
self.cs.low()
status = self.spi.send_recv(0x20 | reg)[0]
self.spi.send(buf)
self.cs.high()
return status
def flush_rx(self):
self.cs.low()
self.spi.send(FLUSH_RX)
self.cs.high()
def flush_tx(self):
self.cs.low()
self.spi.send(FLUSH_TX)
self.cs.high()
# power is one of POWER_x defines; speed is one of SPEED_x defines
def set_power_speed(self, power, speed):
setup = self.reg_read(RF_SETUP) & 0b11010001
self.reg_write(RF_SETUP, setup | power | speed)
# length in bytes: 0, 1 or 2
def set_crc(self, length):
config = self.reg_read(CONFIG) & ~(CRCO | EN_CRC)
if length == 0:
pass
elif length == 1:
config |= EN_CRC
else:
config |= EN_CRC | CRCO
self.reg_write(CONFIG, config)
def set_channel(self, channel):
self.reg_write(RF_CH, min(channel, 125))
# address should be a bytes object 5 bytes long
def open_tx_pipe(self, address):
assert len(address) == 5
self.reg_write(RX_ADDR_P0, address)
self.reg_write(TX_ADDR, address)
self.reg_write(RX_PW_P0, self.payload_size)
# address should be a bytes object 5 bytes long
# pipe 0 and 1 have 5 byte address
# pipes 2-5 use same 4 most-significant bytes as pipe 1, plus 1 extra byte
def open_rx_pipe(self, pipe_id, address):
assert len(address) == 5
assert 0 <= pipe_id <= 5
if pipe_id == 0:
self.pipe0_read_addr = address
if pipe_id < 2:
self.reg_write(RX_ADDR_P0 + pipe_id, address)
else:
self.reg_write(RX_ADDR_P0 + pipe_id, address[0])
self.reg_write(RX_PW_P0 + pipe_id, self.payload_size)
self.reg_write(EN_RXADDR, self.reg_read(EN_RXADDR) | (1 << pipe_id))
def start_listening(self):
self.reg_write(CONFIG, self.reg_read(CONFIG) | PWR_UP | PRIM_RX)
self.reg_write(STATUS, RX_DR | TX_DS | MAX_RT)
if self.pipe0_read_addr is not None:
self.reg_write(RX_ADDR_P0, self.pipe0_read_addr)
self.flush_rx()
self.flush_tx()
self.ce.high()
pyb.udelay(130)
def stop_listening(self):
self.ce.low()
self.flush_tx()
self.flush_rx()
# returns True if any data available to recv
def any(self):
return not bool(self.reg_read(FIFO_STATUS) & RX_EMPTY)
def recv(self):
# get the data
self.cs.low()
self.spi.send(R_RX_PAYLOAD)
buf = self.spi.recv(self.payload_size)
self.cs.high()
# clear RX ready flag
self.reg_write(STATUS, RX_DR)
return buf
# blocking wait for tx complete
def send(self, buf, timeout=500):
send_nonblock = self.send_start(buf)
start = pyb.millis()
result = None
while result is None and pyb.elapsed_millis(start) < timeout:
result = self.send_done() # 1 == success, 2 == fail
if result == 2:
raise OSError("send failed")
# non-blocking tx
def send_start(self, buf):
# power up
self.reg_write(CONFIG, (self.reg_read(CONFIG) | PWR_UP) & ~PRIM_RX)
pyb.udelay(150)
# send the data
self.cs.low()
self.spi.send(W_TX_PAYLOAD)
self.spi.send(buf)
if len(buf) < self.payload_size:
self.spi.send(b'\x00' * (self.payload_size - len(buf))) # pad out data
self.cs.high()
# enable the chip so it can send the data
self.ce.high()
pyb.udelay(15) # needs to be >10us
self.ce.low()
# returns None if send still in progress, 1 for success, 2 for fail
def send_done(self):
if not (self.reg_read(STATUS) & (TX_DS | MAX_RT)):
return None # tx not finished
# either finished or failed: get and clear status flags, power down
status = self.reg_write(STATUS, RX_DR | TX_DS | MAX_RT)
self.reg_write(CONFIG, self.reg_read(CONFIG) & ~PWR_UP)
return 1 if status & TX_DS else 2
| mit |
cubicova17/annet | venv/lib/python2.7/site-packages/psycopg2/tests/test_module.py | 8 | 5957 | #!/usr/bin/env python
# test_module.py - unit test for the module interface
#
# Copyright (C) 2011 Daniele Varrazzo <daniele.varrazzo@gmail.com>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
from testutils import unittest, skip_before_python
from testconfig import dsn
import psycopg2
class ConnectTestCase(unittest.TestCase):
def setUp(self):
self.args = None
def conect_stub(dsn, connection_factory=None, async=False):
self.args = (dsn, connection_factory, async)
self._connect_orig = psycopg2._connect
psycopg2._connect = conect_stub
def tearDown(self):
psycopg2._connect = self._connect_orig
def test_there_has_to_be_something(self):
self.assertRaises(psycopg2.InterfaceError, psycopg2.connect)
self.assertRaises(psycopg2.InterfaceError, psycopg2.connect,
connection_factory=lambda dsn, async=False: None)
self.assertRaises(psycopg2.InterfaceError, psycopg2.connect,
async=True)
def test_no_keywords(self):
psycopg2.connect('')
self.assertEqual(self.args[0], '')
self.assertEqual(self.args[1], None)
self.assertEqual(self.args[2], False)
def test_dsn(self):
psycopg2.connect('dbname=blah x=y')
self.assertEqual(self.args[0], 'dbname=blah x=y')
self.assertEqual(self.args[1], None)
self.assertEqual(self.args[2], False)
def test_supported_keywords(self):
psycopg2.connect(database='foo')
self.assertEqual(self.args[0], 'dbname=foo')
psycopg2.connect(user='postgres')
self.assertEqual(self.args[0], 'user=postgres')
psycopg2.connect(password='secret')
self.assertEqual(self.args[0], 'password=secret')
psycopg2.connect(port=5432)
self.assertEqual(self.args[0], 'port=5432')
psycopg2.connect(sslmode='require')
self.assertEqual(self.args[0], 'sslmode=require')
psycopg2.connect(database='foo',
user='postgres', password='secret', port=5432)
self.assert_('dbname=foo' in self.args[0])
self.assert_('user=postgres' in self.args[0])
self.assert_('password=secret' in self.args[0])
self.assert_('port=5432' in self.args[0])
self.assertEqual(len(self.args[0].split()), 4)
def test_generic_keywords(self):
psycopg2.connect(foo='bar')
self.assertEqual(self.args[0], 'foo=bar')
def test_factory(self):
def f(dsn, async=False):
pass
psycopg2.connect(database='foo', bar='baz', connection_factory=f)
self.assertEqual(self.args[0], 'dbname=foo bar=baz')
self.assertEqual(self.args[1], f)
self.assertEqual(self.args[2], False)
psycopg2.connect("dbname=foo bar=baz", connection_factory=f)
self.assertEqual(self.args[0], 'dbname=foo bar=baz')
self.assertEqual(self.args[1], f)
self.assertEqual(self.args[2], False)
def test_async(self):
psycopg2.connect(database='foo', bar='baz', async=1)
self.assertEqual(self.args[0], 'dbname=foo bar=baz')
self.assertEqual(self.args[1], None)
self.assert_(self.args[2])
psycopg2.connect("dbname=foo bar=baz", async=True)
self.assertEqual(self.args[0], 'dbname=foo bar=baz')
self.assertEqual(self.args[1], None)
self.assert_(self.args[2])
def test_empty_param(self):
psycopg2.connect(database='sony', password='')
self.assertEqual(self.args[0], "dbname=sony password=''")
def test_escape(self):
psycopg2.connect(database='hello world')
self.assertEqual(self.args[0], "dbname='hello world'")
psycopg2.connect(database=r'back\slash')
self.assertEqual(self.args[0], r"dbname=back\\slash")
psycopg2.connect(database="quo'te")
self.assertEqual(self.args[0], r"dbname=quo\'te")
psycopg2.connect(database="with\ttab")
self.assertEqual(self.args[0], "dbname='with\ttab'")
psycopg2.connect(database=r"\every thing'")
self.assertEqual(self.args[0], r"dbname='\\every thing\''")
class ExceptionsTestCase(unittest.TestCase):
def setUp(self):
self.conn = psycopg2.connect(dsn)
def tearDown(self):
self.conn.close()
def test_attributes(self):
cur = self.conn.cursor()
try:
cur.execute("select * from nonexist")
except psycopg2.Error, exc:
e = exc
self.assertEqual(e.pgcode, '42P01')
self.assert_(e.pgerror)
self.assert_(e.cursor is cur)
@skip_before_python(2, 5)
def test_pickle(self):
import pickle
cur = self.conn.cursor()
try:
cur.execute("select * from nonexist")
except psycopg2.Error, exc:
e = exc
e1 = pickle.loads(pickle.dumps(e))
self.assertEqual(e.pgerror, e1.pgerror)
self.assertEqual(e.pgcode, e1.pgcode)
self.assert_(e1.cursor is None)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
| mit |
mozilla-releng/scriptworker | src/scriptworker/cot/generate.py | 2 | 4025 | #!/usr/bin/env python
"""Chain of Trust artifact generation.
Attributes:
log (logging.Logger): the log object for this module.
"""
import logging
import os
from scriptworker.client import validate_json_schema
from scriptworker.ed25519 import ed25519_private_key_from_file
from scriptworker.exceptions import ScriptWorkerException
from scriptworker.utils import filepaths_in_dir, format_json, get_hash, load_json_or_yaml, write_to_file
log = logging.getLogger(__name__)
# get_cot_artifacts {{{1
def get_cot_artifacts(context):
"""Generate the artifact relative paths and shas for the chain of trust.
Args:
context (scriptworker.context.Context): the scriptworker context.
Returns:
dict: a dictionary of {"path/to/artifact": {"hash_alg": "..."}, ...}
"""
artifacts = {}
filepaths = filepaths_in_dir(context.config["artifact_dir"])
hash_alg = context.config["chain_of_trust_hash_algorithm"]
for filepath in sorted(filepaths):
path = os.path.join(context.config["artifact_dir"], filepath)
sha = get_hash(path, hash_alg=hash_alg)
artifacts[filepath] = {hash_alg: sha}
return artifacts
# get_cot_environment {{{1
def get_cot_environment(context):
"""Get environment information for the chain of trust artifact.
Args:
context (scriptworker.context.Context): the scriptworker context.
Returns:
dict: the environment info.
"""
env = {}
# TODO
return env
# generate_cot_body {{{1
def generate_cot_body(context):
"""Generate the chain of trust dictionary.
This is the unsigned and unformatted chain of trust artifact contents.
Args:
context (scriptworker.context.Context): the scriptworker context.
Returns:
dict: the unsignd and unformatted chain of trust artifact contents.
Raises:
ScriptWorkerException: on error.
"""
try:
cot = {
"artifacts": get_cot_artifacts(context),
"chainOfTrustVersion": 1,
"runId": context.claim_task["runId"],
"task": context.task,
"taskId": context.claim_task["status"]["taskId"],
"workerGroup": context.claim_task["workerGroup"],
"workerId": context.config["worker_id"],
"workerType": context.config["worker_type"],
"environment": get_cot_environment(context),
}
except (KeyError,) as exc:
raise ScriptWorkerException("Can't generate chain of trust! {}".format(str(exc)))
return cot
# generate_cot {{{1
def generate_cot(context, parent_path=None):
"""Format and sign the cot body, and write to disk.
Args:
context (scriptworker.context.Context): the scriptworker context.
parent_path (str, optional): The directory to write the chain of trust
artifacts to. If None, this is ``artifact_dir/public/``.
Defaults to None.
Returns:
str: the contents of the chain of trust artifact.
Raises:
ScriptWorkerException: on schema error.
"""
body = generate_cot_body(context)
schema = load_json_or_yaml(
context.config["cot_schema_path"],
is_path=True,
exception=ScriptWorkerException,
message="Can't read schema file {}: %(exc)s".format(context.config["cot_schema_path"]),
)
validate_json_schema(body, schema, name="chain of trust")
body = format_json(body)
parent_path = parent_path or os.path.join(context.config["artifact_dir"], "public")
unsigned_path = os.path.join(parent_path, "chain-of-trust.json")
write_to_file(unsigned_path, body)
if context.config["sign_chain_of_trust"]:
ed25519_signature_path = "{}.sig".format(unsigned_path)
ed25519_private_key = ed25519_private_key_from_file(context.config["ed25519_private_key_path"])
ed25519_signature = ed25519_private_key.sign(body.encode("utf-8"))
write_to_file(ed25519_signature_path, ed25519_signature, file_type="binary")
return body
| mpl-2.0 |
yewang15215/django | django/contrib/admin/options.py | 4 | 82223 | from __future__ import unicode_literals
import copy
import json
import operator
from collections import OrderedDict
from functools import partial, reduce, update_wrapper
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.admin import helpers, widgets
from django.contrib.admin.checks import (
BaseModelAdminChecks, InlineModelAdminChecks, ModelAdminChecks,
)
from django.contrib.admin.exceptions import DisallowedModelAdminToField
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.utils import (
NestedObjects, construct_change_message, flatten_fieldsets,
get_deleted_objects, lookup_needs_distinct, model_format_dict, quote,
unquote,
)
from django.contrib.auth import get_permission_codename
from django.core.exceptions import (
FieldDoesNotExist, FieldError, PermissionDenied, ValidationError,
)
from django.core.paginator import Paginator
from django.db import models, router, transaction
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import BLANK_CHOICE_DASH
from django.forms.formsets import DELETION_FIELD_NAME, all_valid
from django.forms.models import (
BaseInlineFormSet, inlineformset_factory, modelform_defines_fields,
modelform_factory, modelformset_factory,
)
from django.forms.widgets import CheckboxSelectMultiple, SelectMultiple
from django.http import Http404, HttpResponseRedirect
from django.http.response import HttpResponseBase
from django.template.response import SimpleTemplateResponse, TemplateResponse
from django.urls import reverse
from django.utils import six
from django.utils.decorators import method_decorator
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html import escape, format_html
from django.utils.http import urlencode, urlquote
from django.utils.safestring import mark_safe
from django.utils.text import capfirst, format_lazy, get_text_list
from django.utils.translation import ugettext as _, ungettext
from django.views.decorators.csrf import csrf_protect
from django.views.generic import RedirectView
IS_POPUP_VAR = '_popup'
TO_FIELD_VAR = '_to_field'
HORIZONTAL, VERTICAL = 1, 2
def get_content_type_for_model(obj):
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level.
from django.contrib.contenttypes.models import ContentType
return ContentType.objects.get_for_model(obj, for_concrete_model=False)
def get_ul_class(radio_style):
return 'radiolist' if radio_style == VERTICAL else 'radiolist inline'
class IncorrectLookupParameters(Exception):
pass
# Defaults for formfield_overrides. ModelAdmin subclasses can change this
# by adding to ModelAdmin.formfield_overrides.
FORMFIELD_FOR_DBFIELD_DEFAULTS = {
models.DateTimeField: {
'form_class': forms.SplitDateTimeField,
'widget': widgets.AdminSplitDateTime
},
models.DateField: {'widget': widgets.AdminDateWidget},
models.TimeField: {'widget': widgets.AdminTimeWidget},
models.TextField: {'widget': widgets.AdminTextareaWidget},
models.URLField: {'widget': widgets.AdminURLFieldWidget},
models.IntegerField: {'widget': widgets.AdminIntegerFieldWidget},
models.BigIntegerField: {'widget': widgets.AdminBigIntegerFieldWidget},
models.CharField: {'widget': widgets.AdminTextInputWidget},
models.ImageField: {'widget': widgets.AdminFileWidget},
models.FileField: {'widget': widgets.AdminFileWidget},
models.EmailField: {'widget': widgets.AdminEmailInputWidget},
}
csrf_protect_m = method_decorator(csrf_protect)
class BaseModelAdmin(six.with_metaclass(forms.MediaDefiningClass)):
"""Functionality common to both ModelAdmin and InlineAdmin."""
raw_id_fields = ()
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
ordering = None
view_on_site = True
show_full_result_count = True
checks_class = BaseModelAdminChecks
def check(self, **kwargs):
return self.checks_class().check(self, **kwargs)
def __init__(self):
# Merge FORMFIELD_FOR_DBFIELD_DEFAULTS with the formfield_overrides
# rather than simply overwriting.
overrides = copy.deepcopy(FORMFIELD_FOR_DBFIELD_DEFAULTS)
for k, v in self.formfield_overrides.items():
overrides.setdefault(k, {}).update(v)
self.formfield_overrides = overrides
def formfield_for_dbfield(self, db_field, request, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
# If the field specifies choices, we don't need to look for special
# admin widgets - we just need to use a select widget of some kind.
if db_field.choices:
return self.formfield_for_choice_field(db_field, request, **kwargs)
# ForeignKey or ManyToManyFields
if isinstance(db_field, models.ManyToManyField) or isinstance(db_field, models.ForeignKey):
# Combine the field kwargs with any options for formfield_overrides.
# Make sure the passed in **kwargs override anything in
# formfield_overrides because **kwargs is more specific, and should
# always win.
if db_field.__class__ in self.formfield_overrides:
kwargs = dict(self.formfield_overrides[db_field.__class__], **kwargs)
# Get the correct formfield.
if isinstance(db_field, models.ForeignKey):
formfield = self.formfield_for_foreignkey(db_field, request, **kwargs)
elif isinstance(db_field, models.ManyToManyField):
formfield = self.formfield_for_manytomany(db_field, request, **kwargs)
# For non-raw_id fields, wrap the widget with a wrapper that adds
# extra HTML -- the "add other" interface -- to the end of the
# rendered output. formfield can be None if it came from a
# OneToOneField with parent_link=True or a M2M intermediary.
if formfield and db_field.name not in self.raw_id_fields:
related_modeladmin = self.admin_site._registry.get(db_field.remote_field.model)
wrapper_kwargs = {}
if related_modeladmin:
wrapper_kwargs.update(
can_add_related=related_modeladmin.has_add_permission(request),
can_change_related=related_modeladmin.has_change_permission(request),
can_delete_related=related_modeladmin.has_delete_permission(request),
)
formfield.widget = widgets.RelatedFieldWidgetWrapper(
formfield.widget, db_field.remote_field, self.admin_site, **wrapper_kwargs
)
return formfield
# If we've got overrides for the formfield defined, use 'em. **kwargs
# passed to formfield_for_dbfield override the defaults.
for klass in db_field.__class__.mro():
if klass in self.formfield_overrides:
kwargs = dict(copy.deepcopy(self.formfield_overrides[klass]), **kwargs)
return db_field.formfield(**kwargs)
# For any other type of field, just call its formfield() method.
return db_field.formfield(**kwargs)
def formfield_for_choice_field(self, db_field, request, **kwargs):
"""
Get a form Field for a database Field that has declared choices.
"""
# If the field is named as a radio_field, use a RadioSelect
if db_field.name in self.radio_fields:
# Avoid stomping on custom widget/choices arguments.
if 'widget' not in kwargs:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
if 'choices' not in kwargs:
kwargs['choices'] = db_field.get_choices(
include_blank=db_field.blank,
blank_choice=[('', _('None'))]
)
return db_field.formfield(**kwargs)
def get_field_queryset(self, db, db_field, request):
"""
If the ModelAdmin specifies ordering, the queryset should respect that
ordering. Otherwise don't specify the queryset, let the field decide
(returns None in that case).
"""
related_admin = self.admin_site._registry.get(db_field.remote_field.model)
if related_admin is not None:
ordering = related_admin.get_ordering(request)
if ordering is not None and ordering != ():
return db_field.remote_field.model._default_manager.using(db).order_by(*ordering)
return None
def formfield_for_foreignkey(self, db_field, request, **kwargs):
"""
Get a form Field for a ForeignKey.
"""
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ForeignKeyRawIdWidget(db_field.remote_field, self.admin_site, using=db)
elif db_field.name in self.radio_fields:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
kwargs['empty_label'] = _('None') if db_field.blank else None
if 'queryset' not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs['queryset'] = queryset
return db_field.formfield(**kwargs)
def formfield_for_manytomany(self, db_field, request, **kwargs):
"""
Get a form Field for a ManyToManyField.
"""
# If it uses an intermediary model that isn't auto created, don't show
# a field in admin.
if not db_field.remote_field.through._meta.auto_created:
return None
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ManyToManyRawIdWidget(db_field.remote_field, self.admin_site, using=db)
elif db_field.name in (list(self.filter_vertical) + list(self.filter_horizontal)):
kwargs['widget'] = widgets.FilteredSelectMultiple(
db_field.verbose_name,
db_field.name in self.filter_vertical
)
if 'queryset' not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs['queryset'] = queryset
form_field = db_field.formfield(**kwargs)
if isinstance(form_field.widget, SelectMultiple) and not isinstance(form_field.widget, CheckboxSelectMultiple):
msg = _('Hold down "Control", or "Command" on a Mac, to select more than one.')
help_text = form_field.help_text
form_field.help_text = format_lazy('{} {}', help_text, msg) if help_text else msg
return form_field
def get_view_on_site_url(self, obj=None):
if obj is None or not self.view_on_site:
return None
if callable(self.view_on_site):
return self.view_on_site(obj)
elif self.view_on_site and hasattr(obj, 'get_absolute_url'):
# use the ContentType lookup if view_on_site is True
return reverse('admin:view_on_site', kwargs={
'content_type_id': get_content_type_for_model(obj).pk,
'object_id': obj.pk
})
def get_empty_value_display(self):
"""
Return the empty_value_display set on ModelAdmin or AdminSite.
"""
try:
return mark_safe(self.empty_value_display)
except AttributeError:
return mark_safe(self.admin_site.empty_value_display)
def get_exclude(self, request, obj=None):
"""
Hook for specifying exclude.
"""
return self.exclude
def get_fields(self, request, obj=None):
"""
Hook for specifying fields.
"""
return self.fields
def get_fieldsets(self, request, obj=None):
"""
Hook for specifying fieldsets.
"""
if self.fieldsets:
return self.fieldsets
return [(None, {'fields': self.get_fields(request, obj)})]
def get_ordering(self, request):
"""
Hook for specifying field ordering.
"""
return self.ordering or () # otherwise we might try to *None, which is bad ;)
def get_readonly_fields(self, request, obj=None):
"""
Hook for specifying custom readonly fields.
"""
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
"""
Hook for specifying custom prepopulated fields.
"""
return self.prepopulated_fields
def get_queryset(self, request):
"""
Returns a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
"""
qs = self.model._default_manager.get_queryset()
# TODO: this should be handled by some parameter to the ChangeList.
ordering = self.get_ordering(request)
if ordering:
qs = qs.order_by(*ordering)
return qs
def lookup_allowed(self, lookup, value):
from django.contrib.admin.filters import SimpleListFilter
model = self.model
# Check FKey lookups that are allowed, so that popups produced by
# ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to,
# are allowed to work.
for fk_lookup in model._meta.related_fkey_lookups:
# As ``limit_choices_to`` can be a callable, invoke it here.
if callable(fk_lookup):
fk_lookup = fk_lookup()
for k, v in widgets.url_params_from_lookup_dict(fk_lookup).items():
if k == lookup and v == value:
return True
relation_parts = []
prev_field = None
for part in lookup.split(LOOKUP_SEP):
try:
field = model._meta.get_field(part)
except FieldDoesNotExist:
# Lookups on non-existent fields are ok, since they're ignored
# later.
break
# It is allowed to filter on values that would be found from local
# model anyways. For example, if you filter on employee__department__id,
# then the id value would be found already from employee__department_id.
if not prev_field or (prev_field.concrete and
field not in prev_field.get_path_info()[-1].target_fields):
relation_parts.append(part)
if not getattr(field, 'get_path_info', None):
# This is not a relational field, so further parts
# must be transforms.
break
prev_field = field
model = field.get_path_info()[-1].to_opts.model
if len(relation_parts) <= 1:
# Either a local field filter, or no fields at all.
return True
clean_lookup = LOOKUP_SEP.join(relation_parts)
valid_lookups = [self.date_hierarchy]
for filter_item in self.list_filter:
if isinstance(filter_item, type) and issubclass(filter_item, SimpleListFilter):
valid_lookups.append(filter_item.parameter_name)
elif isinstance(filter_item, (list, tuple)):
valid_lookups.append(filter_item[0])
else:
valid_lookups.append(filter_item)
return clean_lookup in valid_lookups
def to_field_allowed(self, request, to_field):
"""
Returns True if the model associated with this admin should be
allowed to be referenced by the specified field.
"""
opts = self.model._meta
try:
field = opts.get_field(to_field)
except FieldDoesNotExist:
return False
# Always allow referencing the primary key since it's already possible
# to get this information from the change view URL.
if field.primary_key:
return True
# Allow reverse relationships to models defining m2m fields if they
# target the specified field.
for many_to_many in opts.many_to_many:
if many_to_many.m2m_target_field_name() == to_field:
return True
# Make sure at least one of the models registered for this site
# references this field through a FK or a M2M relationship.
registered_models = set()
for model, admin in self.admin_site._registry.items():
registered_models.add(model)
for inline in admin.inlines:
registered_models.add(inline.model)
related_objects = (
f for f in opts.get_fields(include_hidden=True)
if (f.auto_created and not f.concrete)
)
for related_object in related_objects:
related_model = related_object.related_model
remote_field = related_object.field.remote_field
if (any(issubclass(model, related_model) for model in registered_models) and
hasattr(remote_field, 'get_related_field') and
remote_field.get_related_field() == field):
return True
return False
def has_add_permission(self, request):
"""
Returns True if the given request has permission to add an object.
Can be overridden by the user in subclasses.
"""
opts = self.opts
codename = get_permission_codename('add', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_change_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to change the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to change *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename('change', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to delete the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to delete *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename('delete', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_module_permission(self, request):
"""
Returns True if the given request has any permission in the given
app label.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to view the module on
the admin index page and access the module's index page. Overriding it
does not restrict access to the add, change or delete views. Use
`ModelAdmin.has_(add|change|delete)_permission` for that.
"""
return request.user.has_module_perms(self.opts.app_label)
@python_2_unicode_compatible
class ModelAdmin(BaseModelAdmin):
"Encapsulates all admin options and functionality for a given model."
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_max_show_all = 200
list_editable = ()
search_fields = ()
date_hierarchy = None
save_as = False
save_as_continue = True
save_on_top = False
paginator = Paginator
preserve_filters = True
inlines = []
# Custom templates (designed to be over-ridden in subclasses)
add_form_template = None
change_form_template = None
change_list_template = None
delete_confirmation_template = None
delete_selected_confirmation_template = None
object_history_template = None
# Actions
actions = []
action_form = helpers.ActionForm
actions_on_top = True
actions_on_bottom = False
actions_selection_counter = True
checks_class = ModelAdminChecks
def __init__(self, model, admin_site):
self.model = model
self.opts = model._meta
self.admin_site = admin_site
super(ModelAdmin, self).__init__()
def __str__(self):
return "%s.%s" % (self.model._meta.app_label, self.__class__.__name__)
def get_inline_instances(self, request, obj=None):
inline_instances = []
for inline_class in self.inlines:
inline = inline_class(self.model, self.admin_site)
if request:
if not (inline.has_add_permission(request) or
inline.has_change_permission(request, obj) or
inline.has_delete_permission(request, obj)):
continue
if not inline.has_add_permission(request):
inline.max_num = 0
inline_instances.append(inline)
return inline_instances
def get_urls(self):
from django.conf.urls import url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
wrapper.model_admin = self
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.model_name
urlpatterns = [
url(r'^$', wrap(self.changelist_view), name='%s_%s_changelist' % info),
url(r'^add/$', wrap(self.add_view), name='%s_%s_add' % info),
url(r'^(.+)/history/$', wrap(self.history_view), name='%s_%s_history' % info),
url(r'^(.+)/delete/$', wrap(self.delete_view), name='%s_%s_delete' % info),
url(r'^(.+)/change/$', wrap(self.change_view), name='%s_%s_change' % info),
# For backwards compatibility (was the change url before 1.9)
url(r'^(.+)/$', wrap(RedirectView.as_view(
pattern_name='%s:%s_%s_change' % ((self.admin_site.name,) + info)
))),
]
return urlpatterns
@property
def urls(self):
return self.get_urls()
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = [
'core.js',
'vendor/jquery/jquery%s.js' % extra,
'jquery.init.js',
'admin/RelatedObjectLookups.js',
'actions%s.js' % extra,
'urlify.js',
'prepopulate%s.js' % extra,
'vendor/xregexp/xregexp%s.js' % extra,
]
return forms.Media(js=['admin/js/%s' % url for url in js])
def get_model_perms(self, request):
"""
Returns a dict of all perms for this model. This dict has the keys
``add``, ``change``, and ``delete`` mapping to the True/False for each
of those actions.
"""
return {
'add': self.has_add_permission(request),
'change': self.has_change_permission(request),
'delete': self.has_delete_permission(request),
}
def get_fields(self, request, obj=None):
if self.fields:
return self.fields
form = self.get_form(request, obj, fields=None)
return list(form.base_fields) + list(self.get_readonly_fields(request, obj))
def get_form(self, request, obj=None, **kwargs):
"""
Returns a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
excluded = self.get_exclude(request, obj)
exclude = [] if excluded is None else list(excluded)
readonly_fields = self.get_readonly_fields(request, obj)
exclude.extend(readonly_fields)
if excluded is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# ModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# if exclude is an empty list we pass None to be consistent with the
# default on modelform_factory
exclude = exclude or None
# Remove declared form fields which are in readonly_fields.
new_attrs = OrderedDict(
(f, None) for f in readonly_fields
if f in self.form.declared_fields
)
form = type(self.form.__name__, (self.form,), new_attrs)
defaults = {
"form": form,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
try:
return modelform_factory(self.model, **defaults)
except FieldError as e:
raise FieldError(
'%s. Check fields/fieldsets/exclude attributes of class %s.'
% (e, self.__class__.__name__)
)
def get_changelist(self, request, **kwargs):
"""
Returns the ChangeList class for use on the changelist page.
"""
from django.contrib.admin.views.main import ChangeList
return ChangeList
def get_object(self, request, object_id, from_field=None):
"""
Returns an instance matching the field and value provided, the primary
key is used if no field is provided. Returns ``None`` if no match is
found or the object_id fails validation.
"""
queryset = self.get_queryset(request)
model = queryset.model
field = model._meta.pk if from_field is None else model._meta.get_field(from_field)
try:
object_id = field.to_python(object_id)
return queryset.get(**{field.name: object_id})
except (model.DoesNotExist, ValidationError, ValueError):
return None
def get_changelist_form(self, request, **kwargs):
"""
Returns a Form class for use in the Formset on the changelist page.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
if defaults.get('fields') is None and not modelform_defines_fields(defaults.get('form')):
defaults['fields'] = forms.ALL_FIELDS
return modelform_factory(self.model, **defaults)
def get_changelist_formset(self, request, **kwargs):
"""
Returns a FormSet class for use on the changelist page if list_editable
is used.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
return modelformset_factory(
self.model, self.get_changelist_form(request), extra=0,
fields=self.list_editable, **defaults
)
def get_formsets_with_inlines(self, request, obj=None):
"""
Yields formsets and the corresponding inlines.
"""
for inline in self.get_inline_instances(request, obj):
yield inline.get_formset(request, obj), inline
def get_paginator(self, request, queryset, per_page, orphans=0, allow_empty_first_page=True):
return self.paginator(queryset, per_page, orphans, allow_empty_first_page)
def log_addition(self, request, object, message):
"""
Log that an object has been successfully added.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, ADDITION
return LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=force_text(object),
action_flag=ADDITION,
change_message=message,
)
def log_change(self, request, object, message):
"""
Log that an object has been successfully changed.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, CHANGE
return LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=force_text(object),
action_flag=CHANGE,
change_message=message,
)
def log_deletion(self, request, object, object_repr):
"""
Log that an object will be deleted. Note that this method must be
called before the deletion.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, DELETION
return LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=object_repr,
action_flag=DELETION,
)
def action_checkbox(self, obj):
"""
A list_display column containing a checkbox widget.
"""
return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME, force_text(obj.pk))
action_checkbox.short_description = mark_safe('<input type="checkbox" id="action-toggle" />')
def get_actions(self, request):
"""
Return a dictionary mapping the names of all actions for this
ModelAdmin to a tuple of (callable, name, description) for each action.
"""
# If self.actions is explicitly set to None that means that we don't
# want *any* actions enabled on this page.
if self.actions is None or IS_POPUP_VAR in request.GET:
return OrderedDict()
actions = []
# Gather actions from the admin site first
for (name, func) in self.admin_site.actions:
description = getattr(func, 'short_description', name.replace('_', ' '))
actions.append((func, name, description))
# Then gather them from the model admin and all parent classes,
# starting with self and working back up.
for klass in self.__class__.mro()[::-1]:
class_actions = getattr(klass, 'actions', [])
# Avoid trying to iterate over None
if not class_actions:
continue
actions.extend(self.get_action(action) for action in class_actions)
# get_action might have returned None, so filter any of those out.
actions = filter(None, actions)
# Convert the actions into an OrderedDict keyed by name.
actions = OrderedDict(
(name, (func, name, desc))
for func, name, desc in actions
)
return actions
def get_action_choices(self, request, default_choices=BLANK_CHOICE_DASH):
"""
Return a list of choices for use in a form object. Each choice is a
tuple (name, description).
"""
choices = [] + default_choices
for func, name, description in six.itervalues(self.get_actions(request)):
choice = (name, description % model_format_dict(self.opts))
choices.append(choice)
return choices
def get_action(self, action):
"""
Return a given action from a parameter, which can either be a callable,
or the name of a method on the ModelAdmin. Return is a tuple of
(callable, name, description).
"""
# If the action is a callable, just use it.
if callable(action):
func = action
action = action.__name__
# Next, look for a method. Grab it off self.__class__ to get an unbound
# method instead of a bound one; this ensures that the calling
# conventions are the same for functions and methods.
elif hasattr(self.__class__, action):
func = getattr(self.__class__, action)
# Finally, look for a named method on the admin site
else:
try:
func = self.admin_site.get_action(action)
except KeyError:
return None
if hasattr(func, 'short_description'):
description = func.short_description
else:
description = capfirst(action.replace('_', ' '))
return func, action, description
def get_list_display(self, request):
"""
Return a sequence containing the fields to be displayed on the
changelist.
"""
return self.list_display
def get_list_display_links(self, request, list_display):
"""
Return a sequence containing the fields to be displayed as links
on the changelist. The list_display parameter is the list of fields
returned by get_list_display().
"""
if self.list_display_links or self.list_display_links is None or not list_display:
return self.list_display_links
else:
# Use only the first item in list_display as link
return list(list_display)[:1]
def get_list_filter(self, request):
"""
Returns a sequence containing the fields to be displayed as filters in
the right sidebar of the changelist page.
"""
return self.list_filter
def get_list_select_related(self, request):
"""
Returns a list of fields to add to the select_related() part of the
changelist items query.
"""
return self.list_select_related
def get_search_fields(self, request):
"""
Returns a sequence containing the fields to be searched whenever
somebody submits a search query.
"""
return self.search_fields
def get_search_results(self, request, queryset, search_term):
"""
Returns a tuple containing a queryset to implement the search,
and a boolean indicating if the results may contain duplicates.
"""
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
use_distinct = False
search_fields = self.get_search_fields(request)
if search_fields and search_term:
orm_lookups = [construct_search(str(search_field))
for search_field in search_fields]
for bit in search_term.split():
or_queries = [models.Q(**{orm_lookup: bit})
for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
if not use_distinct:
for search_spec in orm_lookups:
if lookup_needs_distinct(self.opts, search_spec):
use_distinct = True
break
return queryset, use_distinct
def get_preserved_filters(self, request):
"""
Returns the preserved filters querystring.
"""
match = request.resolver_match
if self.preserve_filters and match:
opts = self.model._meta
current_url = '%s:%s' % (match.app_name, match.url_name)
changelist_url = 'admin:%s_%s_changelist' % (opts.app_label, opts.model_name)
if current_url == changelist_url:
preserved_filters = request.GET.urlencode()
else:
preserved_filters = request.GET.get('_changelist_filters')
if preserved_filters:
return urlencode({'_changelist_filters': preserved_filters})
return ''
def construct_change_message(self, request, form, formsets, add=False):
"""
Construct a JSON structure describing changes from a changed object.
"""
return construct_change_message(form, formsets, add)
def message_user(self, request, message, level=messages.INFO, extra_tags='',
fail_silently=False):
"""
Send a message to the user. The default implementation
posts a message using the django.contrib.messages backend.
Exposes almost the same API as messages.add_message(), but accepts the
positional arguments in a different order to maintain backwards
compatibility. For convenience, it accepts the `level` argument as
a string rather than the usual level number.
"""
if not isinstance(level, int):
# attempt to get the level if passed a string
try:
level = getattr(messages.constants, level.upper())
except AttributeError:
levels = messages.constants.DEFAULT_TAGS.values()
levels_repr = ', '.join('`%s`' % l for l in levels)
raise ValueError(
'Bad message level string: `%s`. Possible values are: %s'
% (level, levels_repr)
)
messages.add_message(request, level, message, extra_tags=extra_tags, fail_silently=fail_silently)
def save_form(self, request, form, change):
"""
Given a ModelForm return an unsaved instance. ``change`` is True if
the object is being changed, and False if it's being added.
"""
return form.save(commit=False)
def save_model(self, request, obj, form, change):
"""
Given a model instance save it to the database.
"""
obj.save()
def delete_model(self, request, obj):
"""
Given a model instance delete it from the database.
"""
obj.delete()
def save_formset(self, request, form, formset, change):
"""
Given an inline formset save it to the database.
"""
formset.save()
def save_related(self, request, form, formsets, change):
"""
Given the ``HttpRequest``, the parent ``ModelForm`` instance, the
list of inline formsets and a boolean value based on whether the
parent is being added or changed, save the related objects to the
database. Note that at this point save_form() and save_model() have
already been called.
"""
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=change)
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
opts = self.model._meta
app_label = opts.app_label
preserved_filters = self.get_preserved_filters(request)
form_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, form_url)
view_on_site_url = self.get_view_on_site_url(obj)
context.update({
'add': add,
'change': change,
'has_add_permission': self.has_add_permission(request),
'has_change_permission': self.has_change_permission(request, obj),
'has_delete_permission': self.has_delete_permission(request, obj),
'has_file_field': True, # FIXME - this should check if form or formsets have a FileField,
'has_absolute_url': view_on_site_url is not None,
'absolute_url': view_on_site_url,
'form_url': form_url,
'opts': opts,
'content_type_id': get_content_type_for_model(self.model).pk,
'save_as': self.save_as,
'save_on_top': self.save_on_top,
'to_field_var': TO_FIELD_VAR,
'is_popup_var': IS_POPUP_VAR,
'app_label': app_label,
})
if add and self.add_form_template is not None:
form_template = self.add_form_template
else:
form_template = self.change_form_template
request.current_app = self.admin_site.name
return TemplateResponse(request, form_template or [
"admin/%s/%s/change_form.html" % (app_label, opts.model_name),
"admin/%s/change_form.html" % app_label,
"admin/change_form.html"
], context)
def response_add(self, request, obj, post_url_continue=None):
"""
Determines the HttpResponse for the add_view stage.
"""
opts = obj._meta
pk_value = obj._get_pk_val()
preserved_filters = self.get_preserved_filters(request)
obj_url = reverse(
'admin:%s_%s_change' % (opts.app_label, opts.model_name),
args=(quote(pk_value),),
current_app=self.admin_site.name,
)
# Add a link to the object's change form if the user can edit the obj.
if self.has_change_permission(request, obj):
obj_repr = format_html('<a href="{}">{}</a>', urlquote(obj_url), obj)
else:
obj_repr = force_text(obj)
msg_dict = {
'name': force_text(opts.verbose_name),
'obj': obj_repr,
}
# Here, we distinguish between different save types by checking for
# the presence of keys in request.POST.
if IS_POPUP_VAR in request.POST:
to_field = request.POST.get(TO_FIELD_VAR)
if to_field:
attr = str(to_field)
else:
attr = obj._meta.pk.attname
value = obj.serializable_value(attr)
popup_response_data = json.dumps({
'value': six.text_type(value),
'obj': six.text_type(obj),
})
return SimpleTemplateResponse('admin/popup_response.html', {
'popup_response_data': popup_response_data,
})
elif "_continue" in request.POST or (
# Redirecting after "Save as new".
"_saveasnew" in request.POST and self.save_as_continue and
self.has_change_permission(request, obj)
):
msg = format_html(
_('The {name} "{obj}" was added successfully. You may edit it again below.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
if post_url_continue is None:
post_url_continue = obj_url
post_url_continue = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': opts},
post_url_continue
)
return HttpResponseRedirect(post_url_continue)
elif "_addanother" in request.POST:
msg = format_html(
_('The {name} "{obj}" was added successfully. You may add another {name} below.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
else:
msg = format_html(
_('The {name} "{obj}" was added successfully.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_add(request, obj)
def response_change(self, request, obj):
"""
Determines the HttpResponse for the change_view stage.
"""
if IS_POPUP_VAR in request.POST:
to_field = request.POST.get(TO_FIELD_VAR)
attr = str(to_field) if to_field else obj._meta.pk.attname
# Retrieve the `object_id` from the resolved pattern arguments.
value = request.resolver_match.args[0]
new_value = obj.serializable_value(attr)
popup_response_data = json.dumps({
'action': 'change',
'value': six.text_type(value),
'obj': six.text_type(obj),
'new_value': six.text_type(new_value),
})
return SimpleTemplateResponse('admin/popup_response.html', {
'popup_response_data': popup_response_data,
})
opts = self.model._meta
pk_value = obj._get_pk_val()
preserved_filters = self.get_preserved_filters(request)
msg_dict = {
'name': force_text(opts.verbose_name),
'obj': format_html('<a href="{}">{}</a>', urlquote(request.path), obj),
}
if "_continue" in request.POST:
msg = format_html(
_('The {name} "{obj}" was changed successfully. You may edit it again below.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
elif "_saveasnew" in request.POST:
msg = format_html(
_('The {name} "{obj}" was added successfully. You may edit it again below.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse('admin:%s_%s_change' %
(opts.app_label, opts.model_name),
args=(pk_value,),
current_app=self.admin_site.name)
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
elif "_addanother" in request.POST:
msg = format_html(
_('The {name} "{obj}" was changed successfully. You may add another {name} below.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse('admin:%s_%s_add' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
else:
msg = format_html(
_('The {name} "{obj}" was changed successfully.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_change(request, obj)
def response_post_save_add(self, request, obj):
"""
Figure out where to redirect after the 'Save' button has been pressed
when adding a new object.
"""
opts = self.model._meta
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def response_post_save_change(self, request, obj):
"""
Figure out where to redirect after the 'Save' button has been pressed
when editing an existing object.
"""
opts = self.model._meta
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def response_action(self, request, queryset):
"""
Handle an admin action. This is called if a request is POSTed to the
changelist; it returns an HttpResponse if the action was handled, and
None otherwise.
"""
# There can be multiple action forms on the page (at the top
# and bottom of the change list, for example). Get the action
# whose button was pushed.
try:
action_index = int(request.POST.get('index', 0))
except ValueError:
action_index = 0
# Construct the action form.
data = request.POST.copy()
data.pop(helpers.ACTION_CHECKBOX_NAME, None)
data.pop("index", None)
# Use the action whose button was pushed
try:
data.update({'action': data.getlist('action')[action_index]})
except IndexError:
# If we didn't get an action from the chosen form that's invalid
# POST data, so by deleting action it'll fail the validation check
# below. So no need to do anything here
pass
action_form = self.action_form(data, auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
# If the form's valid we can handle the action.
if action_form.is_valid():
action = action_form.cleaned_data['action']
select_across = action_form.cleaned_data['select_across']
func = self.get_actions(request)[action][0]
# Get the list of selected PKs. If nothing's selected, we can't
# perform an action on it, so bail. Except we want to perform
# the action explicitly on all objects.
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
if not selected and not select_across:
# Reminder that something needs to be selected or nothing will happen
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg, messages.WARNING)
return None
if not select_across:
# Perform the action only on the selected objects
queryset = queryset.filter(pk__in=selected)
response = func(self, request, queryset)
# Actions may return an HttpResponse-like object, which will be
# used as the response from the POST. If not, we'll be a good
# little HTTP citizen and redirect back to the changelist page.
if isinstance(response, HttpResponseBase):
return response
else:
return HttpResponseRedirect(request.get_full_path())
else:
msg = _("No action selected.")
self.message_user(request, msg, messages.WARNING)
return None
def response_delete(self, request, obj_display, obj_id):
"""
Determines the HttpResponse for the delete_view stage.
"""
opts = self.model._meta
if IS_POPUP_VAR in request.POST:
popup_response_data = json.dumps({
'action': 'delete',
'value': str(obj_id),
})
return SimpleTemplateResponse('admin/popup_response.html', {
'popup_response_data': popup_response_data,
})
self.message_user(
request,
_('The %(name)s "%(obj)s" was deleted successfully.') % {
'name': force_text(opts.verbose_name),
'obj': force_text(obj_display),
},
messages.SUCCESS,
)
if self.has_change_permission(request, None):
post_url = reverse(
'admin:%s_%s_changelist' % (opts.app_label, opts.model_name),
current_app=self.admin_site.name,
)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': opts}, post_url
)
else:
post_url = reverse('admin:index', current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def render_delete_form(self, request, context):
opts = self.model._meta
app_label = opts.app_label
request.current_app = self.admin_site.name
context.update(
to_field_var=TO_FIELD_VAR,
is_popup_var=IS_POPUP_VAR,
media=self.media,
)
return TemplateResponse(
request,
self.delete_confirmation_template or [
"admin/{}/{}/delete_confirmation.html".format(app_label, opts.model_name),
"admin/{}/delete_confirmation.html".format(app_label),
"admin/delete_confirmation.html",
],
context,
)
def get_inline_formsets(self, request, formsets, inline_instances, obj=None):
inline_admin_formsets = []
for inline, formset in zip(inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request, obj))
readonly = list(inline.get_readonly_fields(request, obj))
prepopulated = dict(inline.get_prepopulated_fields(request, obj))
inline_admin_formset = helpers.InlineAdminFormSet(
inline, formset, fieldsets, prepopulated, readonly,
model_admin=self,
)
inline_admin_formsets.append(inline_admin_formset)
return inline_admin_formsets
def get_changeform_initial_data(self, request):
"""
Get the initial form data.
Unless overridden, this populates from the GET params.
"""
initial = dict(request.GET.items())
for k in initial:
try:
f = self.model._meta.get_field(k)
except FieldDoesNotExist:
continue
# We have to special-case M2Ms as a list of comma-separated PKs.
if isinstance(f, models.ManyToManyField):
initial[k] = initial[k].split(",")
return initial
@csrf_protect_m
def changeform_view(self, request, object_id=None, form_url='', extra_context=None):
with transaction.atomic(using=router.db_for_write(self.model)):
return self._changeform_view(request, object_id, form_url, extra_context)
def _changeform_view(self, request, object_id, form_url, extra_context):
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
model = self.model
opts = model._meta
if request.method == 'POST' and '_saveasnew' in request.POST:
object_id = None
add = object_id is None
if add:
if not self.has_add_permission(request):
raise PermissionDenied
obj = None
else:
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(opts.verbose_name), 'key': escape(object_id)})
ModelForm = self.get_form(request, obj)
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES, instance=obj)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=not add)
else:
form_validated = False
new_object = form.instance
formsets, inline_instances = self._create_formsets(request, new_object, change=not add)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, not add)
self.save_related(request, form, formsets, not add)
change_message = self.construct_change_message(request, form, formsets, add)
if add:
self.log_addition(request, new_object, change_message)
return self.response_add(request, new_object)
else:
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
form_validated = False
else:
if add:
initial = self.get_changeform_initial_data(request)
form = ModelForm(initial=initial)
formsets, inline_instances = self._create_formsets(request, form.instance, change=False)
else:
form = ModelForm(instance=obj)
formsets, inline_instances = self._create_formsets(request, obj, change=True)
adminForm = helpers.AdminForm(
form,
list(self.get_fieldsets(request, obj)),
self.get_prepopulated_fields(request, obj),
self.get_readonly_fields(request, obj),
model_admin=self)
media = self.media + adminForm.media
inline_formsets = self.get_inline_formsets(request, formsets, inline_instances, obj)
for inline_formset in inline_formsets:
media = media + inline_formset.media
context = dict(
self.admin_site.each_context(request),
title=(_('Add %s') if add else _('Change %s')) % force_text(opts.verbose_name),
adminform=adminForm,
object_id=object_id,
original=obj,
is_popup=(IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
to_field=to_field,
media=media,
inline_admin_formsets=inline_formsets,
errors=helpers.AdminErrorList(form, formsets),
preserved_filters=self.get_preserved_filters(request),
)
# Hide the "Save" and "Save and continue" buttons if "Save as New" was
# previously chosen to prevent the interface from getting confusing.
if request.method == 'POST' and not form_validated and "_saveasnew" in request.POST:
context['show_save'] = False
context['show_save_and_continue'] = False
# Use the change template instead of the add template.
add = False
context.update(extra_context or {})
return self.render_change_form(request, context, add=add, change=not add, obj=obj, form_url=form_url)
def add_view(self, request, form_url='', extra_context=None):
return self.changeform_view(request, None, form_url, extra_context)
def change_view(self, request, object_id, form_url='', extra_context=None):
return self.changeform_view(request, object_id, form_url, extra_context)
@csrf_protect_m
def changelist_view(self, request, extra_context=None):
"""
The 'change list' admin view for this model.
"""
from django.contrib.admin.views.main import ERROR_FLAG
opts = self.model._meta
app_label = opts.app_label
if not self.has_change_permission(request, None):
raise PermissionDenied
list_display = self.get_list_display(request)
list_display_links = self.get_list_display_links(request, list_display)
list_filter = self.get_list_filter(request)
search_fields = self.get_search_fields(request)
list_select_related = self.get_list_select_related(request)
# Check actions to see if any are available on this changelist
actions = self.get_actions(request)
if actions:
# Add the action checkboxes if there are any actions available.
list_display = ['action_checkbox'] + list(list_display)
ChangeList = self.get_changelist(request)
try:
cl = ChangeList(
request, self.model, list_display,
list_display_links, list_filter, self.date_hierarchy,
search_fields, list_select_related, self.list_per_page,
self.list_max_show_all, self.list_editable, self,
)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given
# and the 'invalid=1' parameter was already in the query string,
# something is screwed up with the database, so display an error
# page.
if ERROR_FLAG in request.GET.keys():
return SimpleTemplateResponse('admin/invalid_setup.html', {
'title': _('Database error'),
})
return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1')
# If the request was POSTed, this might be a bulk action or a bulk
# edit. Try to look up an action or confirmation first, but if this
# isn't an action the POST will fall through to the bulk edit check,
# below.
action_failed = False
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
# Actions with no confirmation
if (actions and request.method == 'POST' and
'index' in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, queryset=cl.get_queryset(request))
if response:
return response
else:
action_failed = True
else:
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg, messages.WARNING)
action_failed = True
# Actions with confirmation
if (actions and request.method == 'POST' and
helpers.ACTION_CHECKBOX_NAME in request.POST and
'index' not in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, queryset=cl.get_queryset(request))
if response:
return response
else:
action_failed = True
# If we're allowing changelist editing, we need to construct a formset
# for the changelist given all the fields to be edited. Then we'll
# use the formset to validate/process POSTed data.
formset = cl.formset = None
# Handle POSTed bulk-edit data.
if (request.method == "POST" and cl.list_editable and
'_save' in request.POST and not action_failed):
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(request.POST, request.FILES, queryset=self.get_queryset(request))
if formset.is_valid():
changecount = 0
for form in formset.forms:
if form.has_changed():
obj = self.save_form(request, form, change=True)
self.save_model(request, obj, form, change=True)
self.save_related(request, form, formsets=[], change=True)
change_msg = self.construct_change_message(request, form, None)
self.log_change(request, obj, change_msg)
changecount += 1
if changecount:
if changecount == 1:
name = force_text(opts.verbose_name)
else:
name = force_text(opts.verbose_name_plural)
msg = ungettext(
"%(count)s %(name)s was changed successfully.",
"%(count)s %(name)s were changed successfully.",
changecount
) % {
'count': changecount,
'name': name,
'obj': force_text(obj),
}
self.message_user(request, msg, messages.SUCCESS)
return HttpResponseRedirect(request.get_full_path())
# Handle GET -- construct a formset for display.
elif cl.list_editable:
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(queryset=cl.result_list)
# Build the list of media to be used by the formset.
if formset:
media = self.media + formset.media
else:
media = self.media
# Build the action form and populate it with available actions.
if actions:
action_form = self.action_form(auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
media += action_form.media
else:
action_form = None
selection_note_all = ungettext(
'%(total_count)s selected',
'All %(total_count)s selected',
cl.result_count
)
context = dict(
self.admin_site.each_context(request),
module_name=force_text(opts.verbose_name_plural),
selection_note=_('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)},
selection_note_all=selection_note_all % {'total_count': cl.result_count},
title=cl.title,
is_popup=cl.is_popup,
to_field=cl.to_field,
cl=cl,
media=media,
has_add_permission=self.has_add_permission(request),
opts=cl.opts,
action_form=action_form,
actions_on_top=self.actions_on_top,
actions_on_bottom=self.actions_on_bottom,
actions_selection_counter=self.actions_selection_counter,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
request.current_app = self.admin_site.name
return TemplateResponse(request, self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.model_name),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context)
@csrf_protect_m
def delete_view(self, request, object_id, extra_context=None):
with transaction.atomic(using=router.db_for_write(self.model)):
return self._delete_view(request, object_id, extra_context)
def _delete_view(self, request, object_id, extra_context):
"The 'delete' admin view for this model."
opts = self.model._meta
app_label = opts.app_label
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(
_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_text(opts.verbose_name), 'key': escape(object_id)}
)
using = router.db_for_write(self.model)
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
(deleted_objects, model_count, perms_needed, protected) = get_deleted_objects(
[obj], opts, request.user, self.admin_site, using)
if request.POST and not protected: # The user has confirmed the deletion.
if perms_needed:
raise PermissionDenied
obj_display = force_text(obj)
attr = str(to_field) if to_field else opts.pk.attname
obj_id = obj.serializable_value(attr)
self.log_deletion(request, obj, obj_display)
self.delete_model(request, obj)
return self.response_delete(request, obj_display, obj_id)
object_name = force_text(opts.verbose_name)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": object_name}
else:
title = _("Are you sure?")
context = dict(
self.admin_site.each_context(request),
title=title,
object_name=object_name,
object=obj,
deleted_objects=deleted_objects,
model_count=dict(model_count).items(),
perms_lacking=perms_needed,
protected=protected,
opts=opts,
app_label=app_label,
preserved_filters=self.get_preserved_filters(request),
is_popup=(IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
to_field=to_field,
)
context.update(extra_context or {})
return self.render_delete_form(request, context)
def history_view(self, request, object_id, extra_context=None):
"The 'history' admin view for this model."
from django.contrib.admin.models import LogEntry
# First check if the user can see this history.
model = self.model
obj = self.get_object(request, unquote(object_id))
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(model._meta.verbose_name),
'key': escape(object_id),
})
if not self.has_change_permission(request, obj):
raise PermissionDenied
# Then get the history for this object.
opts = model._meta
app_label = opts.app_label
action_list = LogEntry.objects.filter(
object_id=unquote(object_id),
content_type=get_content_type_for_model(model)
).select_related().order_by('action_time')
context = dict(
self.admin_site.each_context(request),
title=_('Change history: %s') % force_text(obj),
action_list=action_list,
module_name=capfirst(force_text(opts.verbose_name_plural)),
object=obj,
opts=opts,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
request.current_app = self.admin_site.name
return TemplateResponse(request, self.object_history_template or [
"admin/%s/%s/object_history.html" % (app_label, opts.model_name),
"admin/%s/object_history.html" % app_label,
"admin/object_history.html"
], context)
def _create_formsets(self, request, obj, change):
"Helper function to generate formsets for add/change_view."
formsets = []
inline_instances = []
prefixes = {}
get_formsets_args = [request]
if change:
get_formsets_args.append(obj)
for FormSet, inline in self.get_formsets_with_inlines(*get_formsets_args):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset_params = {
'instance': obj,
'prefix': prefix,
'queryset': inline.get_queryset(request),
}
if request.method == 'POST':
formset_params.update({
'data': request.POST,
'files': request.FILES,
'save_as_new': '_saveasnew' in request.POST
})
formsets.append(FormSet(**formset_params))
inline_instances.append(inline)
return formsets, inline_instances
class InlineModelAdmin(BaseModelAdmin):
"""
Options for inline editing of ``model`` instances.
Provide ``fk_name`` to specify the attribute name of the ``ForeignKey``
from ``model`` to its parent. This is required if ``model`` has more than
one ``ForeignKey`` to its parent.
"""
model = None
fk_name = None
formset = BaseInlineFormSet
extra = 3
min_num = None
max_num = None
template = None
verbose_name = None
verbose_name_plural = None
can_delete = True
show_change_link = False
checks_class = InlineModelAdminChecks
classes = None
def __init__(self, parent_model, admin_site):
self.admin_site = admin_site
self.parent_model = parent_model
self.opts = self.model._meta
self.has_registered_model = admin_site.is_registered(self.model)
super(InlineModelAdmin, self).__init__()
if self.verbose_name is None:
self.verbose_name = self.model._meta.verbose_name
if self.verbose_name_plural is None:
self.verbose_name_plural = self.model._meta.verbose_name_plural
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = ['vendor/jquery/jquery%s.js' % extra, 'jquery.init.js',
'inlines%s.js' % extra]
if self.filter_vertical or self.filter_horizontal:
js.extend(['SelectBox.js', 'SelectFilter2.js'])
if self.classes and 'collapse' in self.classes:
js.append('collapse%s.js' % extra)
return forms.Media(js=['admin/js/%s' % url for url in js])
def get_extra(self, request, obj=None, **kwargs):
"""Hook for customizing the number of extra inline forms."""
return self.extra
def get_min_num(self, request, obj=None, **kwargs):
"""Hook for customizing the min number of inline forms."""
return self.min_num
def get_max_num(self, request, obj=None, **kwargs):
"""Hook for customizing the max number of extra inline forms."""
return self.max_num
def get_formset(self, request, obj=None, **kwargs):
"""Returns a BaseInlineFormSet class for use in admin add/change views."""
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
excluded = self.get_exclude(request, obj)
exclude = [] if excluded is None else list(excluded)
exclude.extend(self.get_readonly_fields(request, obj))
if excluded is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# InlineModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# If exclude is an empty list we use None, since that's the actual
# default.
exclude = exclude or None
can_delete = self.can_delete and self.has_delete_permission(request, obj)
defaults = {
"form": self.form,
"formset": self.formset,
"fk_name": self.fk_name,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
"extra": self.get_extra(request, obj, **kwargs),
"min_num": self.get_min_num(request, obj, **kwargs),
"max_num": self.get_max_num(request, obj, **kwargs),
"can_delete": can_delete,
}
defaults.update(kwargs)
base_model_form = defaults['form']
class DeleteProtectedModelForm(base_model_form):
def hand_clean_DELETE(self):
"""
We don't validate the 'DELETE' field itself because on
templates it's not rendered using the field information, but
just using a generic "deletion_field" of the InlineModelAdmin.
"""
if self.cleaned_data.get(DELETION_FIELD_NAME, False):
using = router.db_for_write(self._meta.model)
collector = NestedObjects(using=using)
if self.instance.pk is None:
return
collector.collect([self.instance])
if collector.protected:
objs = []
for p in collector.protected:
objs.append(
# Translators: Model verbose name and instance representation,
# suitable to be an item in a list.
_('%(class_name)s %(instance)s') % {
'class_name': p._meta.verbose_name,
'instance': p}
)
params = {'class_name': self._meta.model._meta.verbose_name,
'instance': self.instance,
'related_objects': get_text_list(objs, _('and'))}
msg = _("Deleting %(class_name)s %(instance)s would require "
"deleting the following protected related objects: "
"%(related_objects)s")
raise ValidationError(msg, code='deleting_protected', params=params)
def is_valid(self):
result = super(DeleteProtectedModelForm, self).is_valid()
self.hand_clean_DELETE()
return result
defaults['form'] = DeleteProtectedModelForm
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
return inlineformset_factory(self.parent_model, self.model, **defaults)
def get_fields(self, request, obj=None):
if self.fields:
return self.fields
form = self.get_formset(request, obj, fields=None).form
return list(form.base_fields) + list(self.get_readonly_fields(request, obj))
def get_queryset(self, request):
queryset = super(InlineModelAdmin, self).get_queryset(request)
if not self.has_change_permission(request):
queryset = queryset.none()
return queryset
def has_add_permission(self, request):
if self.opts.auto_created:
# We're checking the rights to an auto-created intermediate model,
# which doesn't have its own individual permissions. The user needs
# to have the change permission for the related model in order to
# be able to do anything with the intermediate model.
return self.has_change_permission(request)
return super(InlineModelAdmin, self).has_add_permission(request)
def has_change_permission(self, request, obj=None):
opts = self.opts
if opts.auto_created:
# The model was auto-created as intermediary for a
# ManyToMany-relationship, find the target model
for field in opts.fields:
if field.remote_field and field.remote_field.model != self.parent_model:
opts = field.remote_field.model._meta
break
codename = get_permission_codename('change', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self, request, obj=None):
if self.opts.auto_created:
# We're checking the rights to an auto-created intermediate model,
# which doesn't have its own individual permissions. The user needs
# to have the change permission for the related model in order to
# be able to do anything with the intermediate model.
return self.has_change_permission(request, obj)
return super(InlineModelAdmin, self).has_delete_permission(request, obj)
class StackedInline(InlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class TabularInline(InlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
| bsd-3-clause |
shs96c/buck | python-dsl/buck_parser/module_whitelist.py | 1 | 6196 | from __future__ import absolute_import, division, print_function, with_statement
import __builtin__
import contextlib
import imp
import inspect
from __builtin__ import __import__ as ORIGINAL_IMPORT
from . import util
class ImportWhitelistManager(object):
def __init__(self, import_whitelist, safe_modules_config, path_predicate):
"""
:param (str)->bool path_predicate:
Predicate returning whether the import whitelist applies to imports from a particular
file.
:param set[str] import_whitelist: modules which can be imported without restriction.
:param dict[str,list[str]] safe_modules_config:
Whitelisted parts for specified module.
Supports submodules, e.g. for a safe version of module 'foo' with submodule 'bar'
specify ``{'foo': ['bar', 'fun1', 'fun2'], 'foo.bar': ['fun3', 'fun4']}``.
"""
self._import_whitelist = frozenset(import_whitelist)
self._safe_modules_config = safe_modules_config
self._path_predicate = path_predicate
self._safe_modules = {} #: cache of safe modules created via config.
@contextlib.contextmanager
def allow_unsafe_import(self, allow=True):
"""Controls behavior of 'import' in a context.
:param bool allow: whether default 'import' behavior should be allowed in the context
"""
# Override '__import__' function. It might have already been overridden if current file
# was included by other build file, original '__import__' is stored in 'ORIGINAL_IMPORT'.
previous_import = __builtin__.__import__
if allow:
__builtin__.__import__ = ORIGINAL_IMPORT
else:
__builtin__.__import__ = self._custom_import
try:
yield
finally:
# Restore previous '__builtin__.__import__'
__builtin__.__import__ = previous_import
def _custom_import(self, name, globals=None, locals=None, fromlist=(), level=-1):
"""Custom '__import__' function.
Returns safe version of a module if configured in `_safe_module_config`.
Otherwise, returns standard module if the module is whitelisted.
Otherwise, blocks importing other modules.
"""
if not fromlist:
# Return the top-level package if 'fromlist' is empty (e.g. 'os' for 'os.path'),
# which is how '__import__' works.
name = name.split(".")[0]
frame = util.get_caller_frame(skip=[__name__])
filename = inspect.getframeinfo(frame).filename
# The import will be always allowed if it was not called from a project file.
if name in self._import_whitelist or not self._path_predicate(filename):
# Importing a module may cause more '__import__' calls if the module uses other
# modules. Such calls should not be blocked if the top-level import was allowed.
with self.allow_unsafe_import():
return ORIGINAL_IMPORT(name, globals, locals, fromlist, level)
# Return safe version of the module if possible
if name in self._safe_modules_config:
return self._get_safe_module(name)
raise ImportError(
"Importing module {0} is forbidden. "
"If you really need to import this module, read about "
"the allow_unsafe_import() function documented at: "
"https://buckbuild.com/function/allow_unsafe_import.html".format(name)
)
@staticmethod
def _block_unsafe_function(module, name):
"""Returns a function that ignores any arguments and raises AttributeError. """
def func(*args, **kwargs):
raise AttributeError(
"Using function {0} is forbidden in the safe version of "
"module {1}. If you really need to use this function read about "
"allow_unsafe_import() documented at: "
"https://buckbuild.com/function/allow_unsafe_import.html".format(
name, module
)
)
return func
def _install_whitelisted_parts(self, mod, safe_mod, whitelist):
"""Copy whitelisted globals from a module to its safe version.
Functions not on the whitelist are blocked to show a more meaningful error.
"""
mod_name = safe_mod.__name__
whitelist_set = set(whitelist)
for name in mod.__dict__:
if name in whitelist_set:
# Check if a safe version is defined in case it's a submodule.
# If it's not defined the original submodule will be copied.
submodule_name = mod_name + "." + name
if submodule_name in self._safe_modules_config:
# Get a safe version of the submodule
safe_mod.__dict__[name] = self._get_safe_module(submodule_name)
else:
safe_mod.__dict__[name] = mod.__dict__[name]
elif callable(mod.__dict__[name]):
safe_mod.__dict__[name] = self._block_unsafe_function(mod_name, name)
def _get_safe_module(self, name):
"""Returns a safe version of the module."""
assert name in self._safe_modules_config, (
"Safe version of module %s is not configured." % name
)
# Return the safe version of the module if already created
if name in self._safe_modules:
return self._safe_modules[name]
# Get the normal module, non-empty 'fromlist' prevents returning top-level package
# (e.g. 'os' would be returned for 'os.path' without it)
with self.allow_unsafe_import():
mod = ORIGINAL_IMPORT(name, fromlist=[""])
# Build a new module for the safe version
safe_mod = imp.new_module(name)
# Install whitelisted parts of the module, block the rest to produce errors
# informing about the safe version.
self._install_whitelisted_parts(mod, safe_mod, self._safe_modules_config[name])
# Store the safe version of the module
self._safe_modules[name] = safe_mod
return safe_mod
| apache-2.0 |
Belxjander/Kirito | Python-3.5.0-Amiga/Lib/test/test_importlib/test_locks.py | 8 | 5481 | from . import util as test_util
init = test_util.import_importlib('importlib')
import sys
import time
import unittest
import weakref
from test import support
try:
import threading
except ImportError:
threading = None
else:
from test import lock_tests
if threading is not None:
class ModuleLockAsRLockTests:
locktype = classmethod(lambda cls: cls.LockType("some_lock"))
# _is_owned() unsupported
test__is_owned = None
# acquire(blocking=False) unsupported
test_try_acquire = None
test_try_acquire_contended = None
# `with` unsupported
test_with = None
# acquire(timeout=...) unsupported
test_timeout = None
# _release_save() unsupported
test_release_save_unacquired = None
# lock status in repr unsupported
test_repr = None
test_locked_repr = None
LOCK_TYPES = {kind: splitinit._bootstrap._ModuleLock
for kind, splitinit in init.items()}
(Frozen_ModuleLockAsRLockTests,
Source_ModuleLockAsRLockTests
) = test_util.test_both(ModuleLockAsRLockTests, lock_tests.RLockTests,
LockType=LOCK_TYPES)
else:
LOCK_TYPES = {}
class Frozen_ModuleLockAsRLockTests(unittest.TestCase):
pass
class Source_ModuleLockAsRLockTests(unittest.TestCase):
pass
if threading is not None:
class DeadlockAvoidanceTests:
def setUp(self):
try:
self.old_switchinterval = sys.getswitchinterval()
sys.setswitchinterval(0.000001)
except AttributeError:
self.old_switchinterval = None
def tearDown(self):
if self.old_switchinterval is not None:
sys.setswitchinterval(self.old_switchinterval)
def run_deadlock_avoidance_test(self, create_deadlock):
NLOCKS = 10
locks = [self.LockType(str(i)) for i in range(NLOCKS)]
pairs = [(locks[i], locks[(i+1)%NLOCKS]) for i in range(NLOCKS)]
if create_deadlock:
NTHREADS = NLOCKS
else:
NTHREADS = NLOCKS - 1
barrier = threading.Barrier(NTHREADS)
results = []
def _acquire(lock):
"""Try to acquire the lock. Return True on success,
False on deadlock."""
try:
lock.acquire()
except self.DeadlockError:
return False
else:
return True
def f():
a, b = pairs.pop()
ra = _acquire(a)
barrier.wait()
rb = _acquire(b)
results.append((ra, rb))
if rb:
b.release()
if ra:
a.release()
lock_tests.Bunch(f, NTHREADS).wait_for_finished()
self.assertEqual(len(results), NTHREADS)
return results
def test_deadlock(self):
results = self.run_deadlock_avoidance_test(True)
# At least one of the threads detected a potential deadlock on its
# second acquire() call. It may be several of them, because the
# deadlock avoidance mechanism is conservative.
nb_deadlocks = results.count((True, False))
self.assertGreaterEqual(nb_deadlocks, 1)
self.assertEqual(results.count((True, True)), len(results) - nb_deadlocks)
def test_no_deadlock(self):
results = self.run_deadlock_avoidance_test(False)
self.assertEqual(results.count((True, False)), 0)
self.assertEqual(results.count((True, True)), len(results))
DEADLOCK_ERRORS = {kind: splitinit._bootstrap._DeadlockError
for kind, splitinit in init.items()}
(Frozen_DeadlockAvoidanceTests,
Source_DeadlockAvoidanceTests
) = test_util.test_both(DeadlockAvoidanceTests,
LockType=LOCK_TYPES,
DeadlockError=DEADLOCK_ERRORS)
else:
DEADLOCK_ERRORS = {}
class Frozen_DeadlockAvoidanceTests(unittest.TestCase):
pass
class Source_DeadlockAvoidanceTests(unittest.TestCase):
pass
class LifetimeTests:
@property
def bootstrap(self):
return self.init._bootstrap
def test_lock_lifetime(self):
name = "xyzzy"
self.assertNotIn(name, self.bootstrap._module_locks)
lock = self.bootstrap._get_module_lock(name)
self.assertIn(name, self.bootstrap._module_locks)
wr = weakref.ref(lock)
del lock
support.gc_collect()
self.assertNotIn(name, self.bootstrap._module_locks)
self.assertIsNone(wr())
def test_all_locks(self):
support.gc_collect()
self.assertEqual(0, len(self.bootstrap._module_locks),
self.bootstrap._module_locks)
(Frozen_LifetimeTests,
Source_LifetimeTests
) = test_util.test_both(LifetimeTests, init=init)
@support.reap_threads
def test_main():
support.run_unittest(Frozen_ModuleLockAsRLockTests,
Source_ModuleLockAsRLockTests,
Frozen_DeadlockAvoidanceTests,
Source_DeadlockAvoidanceTests,
Frozen_LifetimeTests,
Source_LifetimeTests)
if __name__ == '__main__':
test_main()
| gpl-3.0 |
jk1/intellij-community | plugins/hg4idea/testData/bin/hgext/largefiles/__init__.py | 91 | 4744 | # Copyright 2009-2010 Gregory P. Ward
# Copyright 2009-2010 Intelerad Medical Systems Incorporated
# Copyright 2010-2011 Fog Creek Software
# Copyright 2010-2011 Unity Technologies
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''track large binary files
Large binary files tend to be not very compressible, not very
diffable, and not at all mergeable. Such files are not handled
efficiently by Mercurial's storage format (revlog), which is based on
compressed binary deltas; storing large binary files as regular
Mercurial files wastes bandwidth and disk space and increases
Mercurial's memory usage. The largefiles extension addresses these
problems by adding a centralized client-server layer on top of
Mercurial: largefiles live in a *central store* out on the network
somewhere, and you only fetch the revisions that you need when you
need them.
largefiles works by maintaining a "standin file" in .hglf/ for each
largefile. The standins are small (41 bytes: an SHA-1 hash plus
newline) and are tracked by Mercurial. Largefile revisions are
identified by the SHA-1 hash of their contents, which is written to
the standin. largefiles uses that revision ID to get/put largefile
revisions from/to the central store. This saves both disk space and
bandwidth, since you don't need to retrieve all historical revisions
of large files when you clone or pull.
To start a new repository or add new large binary files, just add
--large to your :hg:`add` command. For example::
$ dd if=/dev/urandom of=randomdata count=2000
$ hg add --large randomdata
$ hg commit -m 'add randomdata as a largefile'
When you push a changeset that adds/modifies largefiles to a remote
repository, its largefile revisions will be uploaded along with it.
Note that the remote Mercurial must also have the largefiles extension
enabled for this to work.
When you pull a changeset that affects largefiles from a remote
repository, the largefiles for the changeset will by default not be
pulled down. However, when you update to such a revision, any
largefiles needed by that revision are downloaded and cached (if
they have never been downloaded before). One way to pull largefiles
when pulling is thus to use --update, which will update your working
copy to the latest pulled revision (and thereby downloading any new
largefiles).
If you want to pull largefiles you don't need for update yet, then
you can use pull with the `--lfrev` option or the :hg:`lfpull` command.
If you know you are pulling from a non-default location and want to
download all the largefiles that correspond to the new changesets at
the same time, then you can pull with `--lfrev "pulled()"`.
If you just want to ensure that you will have the largefiles needed to
merge or rebase with new heads that you are pulling, then you can pull
with `--lfrev "head(pulled())"` flag to pre-emptively download any largefiles
that are new in the heads you are pulling.
Keep in mind that network access may now be required to update to
changesets that you have not previously updated to. The nature of the
largefiles extension means that updating is no longer guaranteed to
be a local-only operation.
If you already have large files tracked by Mercurial without the
largefiles extension, you will need to convert your repository in
order to benefit from largefiles. This is done with the
:hg:`lfconvert` command::
$ hg lfconvert --size 10 oldrepo newrepo
In repositories that already have largefiles in them, any new file
over 10MB will automatically be added as a largefile. To change this
threshold, set ``largefiles.minsize`` in your Mercurial config file
to the minimum size in megabytes to track as a largefile, or use the
--lfsize option to the add command (also in megabytes)::
[largefiles]
minsize = 2
$ hg add --lfsize 2
The ``largefiles.patterns`` config option allows you to specify a list
of filename patterns (see :hg:`help patterns`) that should always be
tracked as largefiles::
[largefiles]
patterns =
*.jpg
re:.*\.(png|bmp)$
library.zip
content/audio/*
Files that match one of these patterns will be added as largefiles
regardless of their size.
The ``largefiles.minsize`` and ``largefiles.patterns`` config options
will be ignored for any repositories not already containing a
largefile. To add the first largefile to a repository, you must
explicitly do so with the --large flag passed to the :hg:`add`
command.
'''
from mercurial import commands
import lfcommands
import reposetup
import uisetup
testedwith = 'internal'
reposetup = reposetup.reposetup
uisetup = uisetup.uisetup
commands.norepo += " lfconvert"
cmdtable = lfcommands.cmdtable
| apache-2.0 |
ta2-1/pootle | pytest_pootle/fixtures/contributors.py | 3 | 3708 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from collections import OrderedDict
import pytest
CONTRIBUTORS_KWARGS = dict(
noargs={},
projects=dict(project_codes=[u"project0"]),
languages=dict(language_codes=[u"language0"]),
projects_and_languages=dict(
project_codes=[u"project0"],
language_codes=[u"language0"]),
since=dict(since="2000-11-10"),
until=dict(until="2000-11-10"),
since_and_until=dict(since="2000-11-10", until="2000-11-10"),
sort_by=dict(sort_by="contributions"))
CONTRIBUTORS_WITH_EMAIL = OrderedDict((
('admin', {
'username': 'admin',
'full_name': '',
'email': '',
}),
('member', {
'username': 'member',
'full_name': '',
'email': 'member@membership.us',
}),
('funkymember', {
'username': 'funkymember',
'full_name': 'Funky " member with <> and @ and stuff',
'email': 'funky_member@membership.dk',
}),
('fullmember', {
'username': 'fullmember',
'full_name': 'Just a member',
'email': 'full_member@membership.fr',
}),
('comma_member', {
'username': 'comma_member',
'full_name': 'Member, with comma',
'email': 'comma_member@membership.de',
}),
))
@pytest.fixture
def default_contributors_kwargs():
return OrderedDict(
(("include_anon", False),
("since", None),
("until", None),
("project_codes", None),
("language_codes", None),
("sort_by", "username"),
("mailmerge", False)))
@pytest.fixture(params=CONTRIBUTORS_KWARGS)
def contributors_kwargs(request):
return CONTRIBUTORS_KWARGS[request.param]
@pytest.fixture
def dummy_contributors(request, default_contributors_kwargs):
from pootle.core.delegate import contributors
from pootle.core.plugin import getter
from pootle_statistics.utils import Contributors
orig_receivers = contributors.receivers
contributors.receivers = []
class DummyContributors(Contributors):
@property
def contributors(self):
# Hack the output to get back our kwargs.
_result_kwargs = OrderedDict()
for k in default_contributors_kwargs.keys():
_result_kwargs[k] = dict(
full_name=k,
contributions=getattr(
self, k, default_contributors_kwargs[k]))
return _result_kwargs
@getter(contributors, weak=False)
def get_dummy_contribs_(**kwargs_):
return DummyContributors
def _reset_contributors():
contributors.receivers = orig_receivers
request.addfinalizer(_reset_contributors)
@pytest.fixture
def dummy_email_contributors(request):
from pootle.core.delegate import contributors
from pootle.core.plugin import getter
from pootle_statistics.utils import Contributors
orig_receivers = contributors.receivers
contributors.receivers = []
class DummyContributors(Contributors):
@property
def contributors(self):
return OrderedDict(
sorted(CONTRIBUTORS_WITH_EMAIL.items(),
key=lambda x: str.lower(x[1]['username'])))
@getter(contributors, weak=False)
def get_dummy_contribs_(**kwargs_):
return DummyContributors
def _reset_contributors():
contributors.receivers = orig_receivers
request.addfinalizer(_reset_contributors)
| gpl-3.0 |
avmelnikoff/RIOT | dist/tests/if_lib/driver_manager.py | 1 | 1305 | # Copyright (C) 2018 Kevin Weiss <kevin.weiss@haw-hamburg.de>
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
"""@package PyToAPI
This module assigns the drivers to the devices.
"""
import logging
from .serial_driver import SerialDriver
from .riot_driver import RiotDriver
def driver_from_config(dev_type='serial', *args, **kwargs):
"""Returns driver instance given configuration"""
if dev_type == 'serial':
return SerialDriver(*args, **kwargs)
elif dev_type == 'riot':
return RiotDriver(*args, **kwargs)
elif dev_type == 'driver':
return kwargs['driver']
raise NotImplementedError()
def available_configs(dev_type='serial', *args, **kwargs):
"""Returns possible configurations to attempt to connect to."""
if dev_type == 'serial':
return SerialDriver.get_configs(*args, **kwargs)
elif dev_type == 'riot':
return RiotDriver.get_configs(*args, **kwargs)
raise NotImplementedError()
def main():
"""Tests basic usage of the class"""
logging.getLogger().setLevel(logging.DEBUG)
logging.debug(available_configs())
logging.debug(driver_from_config())
if __name__ == "__main__":
main()
| lgpl-2.1 |
darren-rogan/CouchPotatoServer | libs/pyasn1/type/tagmap.py | 20 | 1766 | from pyasn1 import error
class TagMap:
def __init__(self, posMap={}, negMap={}, defType=None):
self.__posMap = posMap.copy()
self.__negMap = negMap.copy()
self.__defType = defType
def __contains__(self, tagSet):
return tagSet in self.__posMap or \
self.__defType is not None and tagSet not in self.__negMap
def __getitem__(self, tagSet):
if tagSet in self.__posMap:
return self.__posMap[tagSet]
elif tagSet in self.__negMap:
raise error.PyAsn1Error('Key in negative map')
elif self.__defType is not None:
return self.__defType
else:
raise KeyError()
def __repr__(self):
s = '%r/%r' % (self.__posMap, self.__negMap)
if self.__defType is not None:
s = s + '/%r' % (self.__defType,)
return s
def clone(self, parentType, tagMap, uniq=False):
if self.__defType is not None and tagMap.getDef() is not None:
raise error.PyAsn1Error('Duplicate default value at %s' % self)
if tagMap.getDef() is not None:
defType = tagMap.getDef()
else:
defType = self.__defType
posMap = self.__posMap.copy()
for k in tagMap.getPosMap():
if uniq and k in posMap:
raise error.PyAsn1Error('Duplicate positive key %s' % k)
posMap[k] = parentType
negMap = self.__negMap.copy()
negMap.update(tagMap.getNegMap())
return self.__class__(
posMap, negMap, defType,
)
def getPosMap(self): return self.__posMap.copy()
def getNegMap(self): return self.__negMap.copy()
def getDef(self): return self.__defType
| gpl-3.0 |
Eficent/sale-workflow | sale_order_back2draft/models/sale_order.py | 30 | 1044 | # -*- encoding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, api, exceptions, _
class SaleOrder(models.Model):
_inherit = "sale.order"
@api.multi
def button_draft(self):
# go from canceled state to draft state
for order in self:
if order.state != 'cancel':
raise exceptions.Warning(
_("You can't back any order that it's not on cancel "
"state. Order: %s" % order.name))
order.order_line.write({'state': 'draft'})
order.procurement_group_id.sudo().unlink()
for line in order.order_line:
line.procurement_ids.sudo().unlink()
order.write({'state': 'draft'})
order.delete_workflow()
order.create_workflow()
return True
| agpl-3.0 |
Juppit/Arduino | tests/device/test_WiFiServer/test_WiFiServer.py | 11 | 1273 | from mock_decorators import setup, teardown
from threading import Thread
import socket
import time
stop_client_thread = False
client_thread = None
@setup('Simple echo server')
def setup_echo_server(e):
global stop_client_thread
global client_thread
def echo_client_thread():
server_address = socket.gethostbyname('esp8266-wfs-test.local')
count = 0
while count < 5 and not stop_client_thread:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((server_address, 5000))
sock.settimeout(1.0)
buf = 'a' * 1023 + '\n'
sock.sendall(buf)
data = ''
retries = 0
while len(data) < 1024 and retries < 3:
data += sock.recv(1024)
retries += 1
print 'Received {} bytes'.format(len(data))
if len(data) != 1024:
raise RuntimeError('client failed to receive response')
count += 1
stop_client_thread = False
client_thread = Thread(target=echo_client_thread)
client_thread.start()
@teardown('Simple echo server')
def teardown_echo_server(e):
global stop_client_thread
stop_client_thread = True
client_thread.join()
| lgpl-2.1 |
horance-liu/tensorflow | tensorflow/examples/speech_commands/label_wav_test.py | 53 | 2298 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for WAVE file labeling tool."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio
from tensorflow.examples.speech_commands import label_wav
from tensorflow.python.platform import test
class LabelWavTest(test.TestCase):
def _getWavData(self):
with self.test_session() as sess:
sample_data = tf.zeros([1000, 2])
wav_encoder = contrib_audio.encode_wav(sample_data, 16000)
wav_data = sess.run(wav_encoder)
return wav_data
def _saveTestWavFile(self, filename, wav_data):
with open(filename, "wb") as f:
f.write(wav_data)
def testLabelWav(self):
tmp_dir = self.get_temp_dir()
wav_data = self._getWavData()
wav_filename = os.path.join(tmp_dir, "wav_file.wav")
self._saveTestWavFile(wav_filename, wav_data)
input_name = "test_input"
output_name = "test_output"
graph_filename = os.path.join(tmp_dir, "test_graph.pb")
with tf.Session() as sess:
tf.placeholder(tf.string, name=input_name)
tf.zeros([1, 3], name=output_name)
with open(graph_filename, "wb") as f:
f.write(sess.graph.as_graph_def().SerializeToString())
labels_filename = os.path.join(tmp_dir, "test_labels.txt")
with open(labels_filename, "w") as f:
f.write("a\nb\nc\n")
label_wav.label_wav(wav_filename, labels_filename, graph_filename,
input_name + ":0", output_name + ":0", 3)
if __name__ == "__main__":
test.main()
| apache-2.0 |
Mslef/ud858 | ConferenceCentral_Complete/utils.py | 384 | 1576 | import json
import os
import time
import uuid
from google.appengine.api import urlfetch
from models import Profile
def getUserId(user, id_type="email"):
if id_type == "email":
return user.email()
if id_type == "oauth":
"""A workaround implementation for getting userid."""
auth = os.getenv('HTTP_AUTHORIZATION')
bearer, token = auth.split()
token_type = 'id_token'
if 'OAUTH_USER_ID' in os.environ:
token_type = 'access_token'
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?%s=%s'
% (token_type, token))
user = {}
wait = 1
for i in range(3):
resp = urlfetch.fetch(url)
if resp.status_code == 200:
user = json.loads(resp.content)
break
elif resp.status_code == 400 and 'invalid_token' in resp.content:
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?%s=%s'
% ('access_token', token))
else:
time.sleep(wait)
wait = wait + i
return user.get('user_id', '')
if id_type == "custom":
# implement your own user_id creation and getting algorythm
# this is just a sample that queries datastore for an existing profile
# and generates an id if profile does not exist for an email
profile = Conference.query(Conference.mainEmail == user.email())
if profile:
return profile.id()
else:
return str(uuid.uuid1().get_hex())
| gpl-3.0 |
samliu/servo | components/script/dom/bindings/codegen/parser/tests/test_promise.py | 74 | 1621 | def WebIDLTest(parser, harness):
threw = False
try:
parser.parse("""
interface _Promise {};
interface A {
legacycaller Promise<any> foo();
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should not allow Promise return values for legacycaller.")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface _Promise {};
interface A {
Promise<any> foo();
long foo(long arg);
};
""")
results = parser.finish();
except:
threw = True
harness.ok(threw,
"Should not allow overloads which have both Promise and "
"non-Promise return types.")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface _Promise {};
interface A {
long foo(long arg);
Promise<any> foo();
};
""")
results = parser.finish();
except:
threw = True
harness.ok(threw,
"Should not allow overloads which have both Promise and "
"non-Promise return types.")
parser = parser.reset()
parser.parse("""
interface _Promise {};
interface A {
Promise<any> foo();
Promise<any> foo(long arg);
};
""")
results = parser.finish();
harness.ok(True,
"Should allow overloads which only have Promise and return "
"types.")
| mpl-2.0 |
ProfessionalIT/maxigenios-website | sdk/google_appengine/lib/django-1.5/django/db/models/sql/subqueries.py | 103 | 9990 | """
Query subclasses which provide extra functionality beyond simple data retrieval.
"""
from django.core.exceptions import FieldError
from django.db import connections
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import DateField, FieldDoesNotExist
from django.db.models.sql.constants import *
from django.db.models.sql.datastructures import Date
from django.db.models.sql.query import Query
from django.db.models.sql.where import AND, Constraint
from django.utils.datastructures import SortedDict
from django.utils.functional import Promise
from django.utils.encoding import force_text
from django.utils import six
__all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'DateQuery',
'AggregateQuery']
class DeleteQuery(Query):
"""
Delete queries are done through this class, since they are more constrained
than general queries.
"""
compiler = 'SQLDeleteCompiler'
def do_query(self, table, where, using):
self.tables = [table]
self.where = where
self.get_compiler(using).execute_sql(None)
def delete_batch(self, pk_list, using, field=None):
"""
Set up and execute delete queries for all the objects in pk_list.
More than one physical query may be executed if there are a
lot of values in pk_list.
"""
if not field:
field = self.model._meta.pk
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
where = self.where_class()
where.add((Constraint(None, field.column, field), 'in',
pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]), AND)
self.do_query(self.model._meta.db_table, where, using=using)
def delete_qs(self, query, using):
"""
Delete the queryset in one SQL query (if possible). For simple queries
this is done by copying the query.query.where to self.query, for
complex queries by using subquery.
"""
innerq = query.query
# Make sure the inner query has at least one table in use.
innerq.get_initial_alias()
# The same for our new query.
self.get_initial_alias()
innerq_used_tables = [t for t in innerq.tables
if innerq.alias_refcount[t]]
if ((not innerq_used_tables or innerq_used_tables == self.tables)
and not len(innerq.having)):
# There is only the base table in use in the query, and there are
# no aggregate filtering going on.
self.where = innerq.where
else:
pk = query.model._meta.pk
if not connections[using].features.update_can_self_select:
# We can't do the delete using subquery.
values = list(query.values_list('pk', flat=True))
if not values:
return
self.delete_batch(values, using)
return
else:
innerq.clear_select_clause()
innerq.select, innerq.select_fields = [(self.get_initial_alias(), pk.column)], [None]
values = innerq
where = self.where_class()
where.add((Constraint(None, pk.column, pk), 'in', values), AND)
self.where = where
self.get_compiler(using).execute_sql(None)
class UpdateQuery(Query):
"""
Represents an "update" SQL query.
"""
compiler = 'SQLUpdateCompiler'
def __init__(self, *args, **kwargs):
super(UpdateQuery, self).__init__(*args, **kwargs)
self._setup_query()
def _setup_query(self):
"""
Runs on initialization and after cloning. Any attributes that would
normally be set in __init__ should go in here, instead, so that they
are also set up after a clone() call.
"""
self.values = []
self.related_ids = None
if not hasattr(self, 'related_updates'):
self.related_updates = {}
def clone(self, klass=None, **kwargs):
return super(UpdateQuery, self).clone(klass,
related_updates=self.related_updates.copy(), **kwargs)
def update_batch(self, pk_list, values, using):
pk_field = self.model._meta.pk
self.add_update_values(values)
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.where.add((Constraint(None, pk_field.column, pk_field), 'in',
pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]),
AND)
self.get_compiler(using).execute_sql(None)
def add_update_values(self, values):
"""
Convert a dictionary of field name to value mappings into an update
query. This is the entry point for the public update() method on
querysets.
"""
values_seq = []
for name, val in six.iteritems(values):
field, model, direct, m2m = self.model._meta.get_field_by_name(name)
if not direct or m2m:
raise FieldError('Cannot update model field %r (only non-relations and foreign keys permitted).' % field)
if model:
self.add_related_update(model, field, val)
continue
values_seq.append((field, model, val))
return self.add_update_fields(values_seq)
def add_update_fields(self, values_seq):
"""
Turn a sequence of (field, model, value) triples into an update query.
Used by add_update_values() as well as the "fast" update path when
saving models.
"""
# Check that no Promise object passes to the query. Refs #10498.
values_seq = [(value[0], value[1], force_text(value[2]))
if isinstance(value[2], Promise) else value
for value in values_seq]
self.values.extend(values_seq)
def add_related_update(self, model, field, value):
"""
Adds (name, value) to an update query for an ancestor model.
Updates are coalesced so that we only run one update query per ancestor.
"""
try:
self.related_updates[model].append((field, None, value))
except KeyError:
self.related_updates[model] = [(field, None, value)]
def get_related_updates(self):
"""
Returns a list of query objects: one for each update required to an
ancestor model. Each query will have the same filtering conditions as
the current query but will only update a single table.
"""
if not self.related_updates:
return []
result = []
for model, values in six.iteritems(self.related_updates):
query = UpdateQuery(model)
query.values = values
if self.related_ids is not None:
query.add_filter(('pk__in', self.related_ids))
result.append(query)
return result
class InsertQuery(Query):
compiler = 'SQLInsertCompiler'
def __init__(self, *args, **kwargs):
super(InsertQuery, self).__init__(*args, **kwargs)
self.fields = []
self.objs = []
def clone(self, klass=None, **kwargs):
extras = {
'fields': self.fields[:],
'objs': self.objs[:],
'raw': self.raw,
}
extras.update(kwargs)
return super(InsertQuery, self).clone(klass, **extras)
def insert_values(self, fields, objs, raw=False):
"""
Set up the insert query from the 'insert_values' dictionary. The
dictionary gives the model field names and their target values.
If 'raw_values' is True, the values in the 'insert_values' dictionary
are inserted directly into the query, rather than passed as SQL
parameters. This provides a way to insert NULL and DEFAULT keywords
into the query, for example.
"""
self.fields = fields
# Check that no Promise object reaches the DB. Refs #10498.
for field in fields:
for obj in objs:
value = getattr(obj, field.attname)
if isinstance(value, Promise):
setattr(obj, field.attname, force_text(value))
self.objs = objs
self.raw = raw
class DateQuery(Query):
"""
A DateQuery is a normal query, except that it specifically selects a single
date field. This requires some special handling when converting the results
back to Python objects, so we put it in a separate class.
"""
compiler = 'SQLDateCompiler'
def add_date_select(self, field_name, lookup_type, order='ASC'):
"""
Converts the query into a date extraction query.
"""
try:
result = self.setup_joins(
field_name.split(LOOKUP_SEP),
self.get_meta(),
self.get_initial_alias(),
False
)
except FieldError:
raise FieldDoesNotExist("%s has no field named '%s'" % (
self.model._meta.object_name, field_name
))
field = result[0]
assert isinstance(field, DateField), "%r isn't a DateField." \
% field.name
alias = result[3][-1]
select = Date((alias, field.column), lookup_type)
self.clear_select_clause()
self.select, self.select_fields = [select], [None]
self.distinct = True
self.order_by = order == 'ASC' and [1] or [-1]
if field.null:
self.add_filter(("%s__isnull" % field_name, False))
class AggregateQuery(Query):
"""
An AggregateQuery takes another query as a parameter to the FROM
clause and only selects the elements in the provided list.
"""
compiler = 'SQLAggregateCompiler'
def add_subquery(self, query, using):
self.subquery, self.sub_params = query.get_compiler(using).as_sql(with_col_aliases=True)
| mit |
mancoast/CPythonPyc_test | fail/312_test_SimpleHTTPServer.py | 7 | 1334 | """
These tests only check url parsing for now.
We don't want to require the 'network' resource.
"""
import os, unittest
from http.server import SimpleHTTPRequestHandler
from test import support
class SocketlessRequestHandler(SimpleHTTPRequestHandler):
def __init__(self):
pass
class SimpleHTTPRequestHandlerTestCase(unittest.TestCase):
""" Test url parsing """
def setUp (self):
self.translated = os.getcwd()
self.translated = os.path.join(self.translated, 'filename')
self.handler = SocketlessRequestHandler ()
def test_queryArguments (self):
path = self.handler.translate_path ('/filename')
self.assertEquals (path, self.translated)
path = self.handler.translate_path ('/filename?foo=bar')
self.assertEquals (path, self.translated)
path = self.handler.translate_path ('/filename?a=b&spam=eggs#zot')
self.assertEquals (path, self.translated)
def test_startWithDoubleSlash (self):
path = self.handler.translate_path ('//filename')
self.assertEquals (path, self.translated)
path = self.handler.translate_path ('//filename?foo=bar')
self.assertEquals (path, self.translated)
def test_main():
support.run_unittest(SimpleHTTPRequestHandlerTestCase)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
Tokyo-Buffalo/tokyosouth | env/lib/python3.6/site-packages/wheel/paths.py | 565 | 1130 | """
Installation paths.
Map the .data/ subdirectory names to install paths.
"""
import os.path
import sys
import distutils.dist as dist
import distutils.command.install as install
def get_install_command(name):
# late binding due to potential monkeypatching
d = dist.Distribution({'name':name})
i = install.install(d)
i.finalize_options()
return i
def get_install_paths(name):
"""
Return the (distutils) install paths for the named dist.
A dict with ('purelib', 'platlib', 'headers', 'scripts', 'data') keys.
"""
paths = {}
i = get_install_command(name)
for key in install.SCHEME_KEYS:
paths[key] = getattr(i, 'install_' + key)
# pip uses a similar path as an alternative to the system's (read-only)
# include directory:
if hasattr(sys, 'real_prefix'): # virtualenv
paths['headers'] = os.path.join(sys.prefix,
'include',
'site',
'python' + sys.version[:3],
name)
return paths
| mit |
MiltosD/CEF-ELRC | lib/python2.7/site-packages/django/utils/numberformat.py | 290 | 1632 | from django.conf import settings
from django.utils.safestring import mark_safe
def format(number, decimal_sep, decimal_pos, grouping=0, thousand_sep=''):
"""
Gets a number (as a number or string), and returns it as a string,
using formats definied as arguments:
* decimal_sep: Decimal separator symbol (for example ".")
* decimal_pos: Number of decimal positions
* grouping: Number of digits in every group limited by thousand separator
* thousand_sep: Thousand separator symbol (for example ",")
"""
use_grouping = settings.USE_L10N and \
settings.USE_THOUSAND_SEPARATOR and grouping
# Make the common case fast:
if isinstance(number, int) and not use_grouping and not decimal_pos:
return mark_safe(unicode(number))
# sign
if float(number) < 0:
sign = '-'
else:
sign = ''
str_number = unicode(number)
if str_number[0] == '-':
str_number = str_number[1:]
# decimal part
if '.' in str_number:
int_part, dec_part = str_number.split('.')
if decimal_pos:
dec_part = dec_part[:decimal_pos]
else:
int_part, dec_part = str_number, ''
if decimal_pos:
dec_part = dec_part + ('0' * (decimal_pos - len(dec_part)))
if dec_part: dec_part = decimal_sep + dec_part
# grouping
if use_grouping:
int_part_gd = ''
for cnt, digit in enumerate(int_part[::-1]):
if cnt and not cnt % grouping:
int_part_gd += thousand_sep
int_part_gd += digit
int_part = int_part_gd[::-1]
return sign + int_part + dec_part
| bsd-3-clause |
diox/app-validator | appvalidator/errorbundle/metadatamixin.py | 5 | 2010 | from collections import defaultdict
class MetadataMixin(object):
"""
This mixin adds metadata functionality to the standard error bundle.
Including this in the error bundle allows the app to collect and process
metadata during the validation process.
"""
def __init__(self, *args, **kwargs):
self.resources = {}
self.pushable_resources = {}
self.final_context = None
self.metadata = {}
self.feature_profile = set()
self.feature_usage = defaultdict(list)
super(MetadataMixin, self).__init__(*args, **kwargs)
def get_resource(self, name):
"""Retrieve an object that has been stored by another test."""
if name in self.resources:
return self.resources[name]
elif name in self.pushable_resources:
return self.pushable_resources[name]
else:
return False
def get_or_create(self, name, default, pushable=False):
"""Retrieve an object that has been stored by another test, or create
it if it does not exist.
"""
if name in self.resources:
return self.resources[name]
if name in self.pushable_resources:
return self.pushable_resources[name]
else:
return self.save_resource(name, default, pushable=pushable)
def save_resource(self, name, resource, pushable=False):
"""Save an object such that it can be used by other tests."""
if pushable:
self.pushable_resources[name] = resource
else:
self.resources[name] = resource
return resource
def _extend_json(self):
"""Output the metadata as part of the main JSON blob."""
extension = super(MetadataMixin, self)._extend_json() or {}
extension.update(metadata=self.metadata,
feature_profile=list(self.feature_profile),
feature_usage=dict(self.feature_usage))
return extension
| bsd-3-clause |
linjoahow/lego_Automatic-assembly | static/Brython3.1.0-20150301-090019/Lib/datetime.py | 628 | 75044 | """Concrete date/time and related types.
See http://www.iana.org/time-zones/repository/tz-link.html for
time zone and DST data sources.
"""
import time as _time
import math as _math
def _cmp(x, y):
return 0 if x == y else 1 if x > y else -1
MINYEAR = 1
MAXYEAR = 9999
_MAXORDINAL = 3652059 # date.max.toordinal()
# Utility functions, adapted from Python's Demo/classes/Dates.py, which
# also assumes the current Gregorian calendar indefinitely extended in
# both directions. Difference: Dates.py calls January 1 of year 0 day
# number 1. The code here calls January 1 of year 1 day number 1. This is
# to match the definition of the "proleptic Gregorian" calendar in Dershowitz
# and Reingold's "Calendrical Calculations", where it's the base calendar
# for all computations. See the book for algorithms for converting between
# proleptic Gregorian ordinals and many other calendar systems.
_DAYS_IN_MONTH = [None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_DAYS_BEFORE_MONTH = [None]
dbm = 0
for dim in _DAYS_IN_MONTH[1:]:
_DAYS_BEFORE_MONTH.append(dbm)
dbm += dim
del dbm, dim
def _is_leap(year):
"year -> 1 if leap year, else 0."
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def _days_before_year(year):
"year -> number of days before January 1st of year."
y = year - 1
return y*365 + y//4 - y//100 + y//400
def _days_in_month(year, month):
"year, month -> number of days in that month in that year."
assert 1 <= month <= 12, month
if month == 2 and _is_leap(year):
return 29
return _DAYS_IN_MONTH[month]
def _days_before_month(year, month):
"year, month -> number of days in year preceding first day of month."
assert 1 <= month <= 12, 'month must be in 1..12'
return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year))
def _ymd2ord(year, month, day):
"year, month, day -> ordinal, considering 01-Jan-0001 as day 1."
assert 1 <= month <= 12, 'month must be in 1..12'
dim = _days_in_month(year, month)
assert 1 <= day <= dim, ('day must be in 1..%d' % dim)
return (_days_before_year(year) +
_days_before_month(year, month) +
day)
_DI400Y = _days_before_year(401) # number of days in 400 years
_DI100Y = _days_before_year(101) # " " " " 100 "
_DI4Y = _days_before_year(5) # " " " " 4 "
# A 4-year cycle has an extra leap day over what we'd get from pasting
# together 4 single years.
assert _DI4Y == 4 * 365 + 1
# Similarly, a 400-year cycle has an extra leap day over what we'd get from
# pasting together 4 100-year cycles.
assert _DI400Y == 4 * _DI100Y + 1
# OTOH, a 100-year cycle has one fewer leap day than we'd get from
# pasting together 25 4-year cycles.
assert _DI100Y == 25 * _DI4Y - 1
def _ord2ymd(n):
"ordinal -> (year, month, day), considering 01-Jan-0001 as day 1."
# n is a 1-based index, starting at 1-Jan-1. The pattern of leap years
# repeats exactly every 400 years. The basic strategy is to find the
# closest 400-year boundary at or before n, then work with the offset
# from that boundary to n. Life is much clearer if we subtract 1 from
# n first -- then the values of n at 400-year boundaries are exactly
# those divisible by _DI400Y:
#
# D M Y n n-1
# -- --- ---- ---------- ----------------
# 31 Dec -400 -_DI400Y -_DI400Y -1
# 1 Jan -399 -_DI400Y +1 -_DI400Y 400-year boundary
# ...
# 30 Dec 000 -1 -2
# 31 Dec 000 0 -1
# 1 Jan 001 1 0 400-year boundary
# 2 Jan 001 2 1
# 3 Jan 001 3 2
# ...
# 31 Dec 400 _DI400Y _DI400Y -1
# 1 Jan 401 _DI400Y +1 _DI400Y 400-year boundary
n -= 1
n400, n = divmod(n, _DI400Y)
year = n400 * 400 + 1 # ..., -399, 1, 401, ...
# Now n is the (non-negative) offset, in days, from January 1 of year, to
# the desired date. Now compute how many 100-year cycles precede n.
# Note that it's possible for n100 to equal 4! In that case 4 full
# 100-year cycles precede the desired day, which implies the desired
# day is December 31 at the end of a 400-year cycle.
n100, n = divmod(n, _DI100Y)
# Now compute how many 4-year cycles precede it.
n4, n = divmod(n, _DI4Y)
# And now how many single years. Again n1 can be 4, and again meaning
# that the desired day is December 31 at the end of the 4-year cycle.
n1, n = divmod(n, 365)
year += n100 * 100 + n4 * 4 + n1
if n1 == 4 or n100 == 4:
assert n == 0
return year-1, 12, 31
# Now the year is correct, and n is the offset from January 1. We find
# the month via an estimate that's either exact or one too large.
leapyear = n1 == 3 and (n4 != 24 or n100 == 3)
assert leapyear == _is_leap(year)
month = (n + 50) >> 5
preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear)
if preceding > n: # estimate is too large
month -= 1
preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear)
n -= preceding
assert 0 <= n < _days_in_month(year, month)
# Now the year and month are correct, and n is the offset from the
# start of that month: we're done!
return year, month, n+1
# Month and day names. For localized versions, see the calendar module.
_MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
_DAYNAMES = [None, "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
def _build_struct_time(y, m, d, hh, mm, ss, dstflag):
wday = (_ymd2ord(y, m, d) + 6) % 7
dnum = _days_before_month(y, m) + d
return _time.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag))
def _format_time(hh, mm, ss, us):
# Skip trailing microseconds when us==0.
result = "%02d:%02d:%02d" % (hh, mm, ss)
if us:
result += ".%06d" % us
return result
# Correctly substitute for %z and %Z escapes in strftime formats.
def _wrap_strftime(object, format, timetuple):
# Don't call utcoffset() or tzname() unless actually needed.
freplace = None # the string to use for %f
zreplace = None # the string to use for %z
Zreplace = None # the string to use for %Z
# Scan format for %z and %Z escapes, replacing as needed.
newformat = []
push = newformat.append
i, n = 0, len(format)
while i < n:
ch = format[i]
i += 1
if ch == '%':
if i < n:
ch = format[i]
i += 1
if ch == 'f':
if freplace is None:
freplace = '%06d' % getattr(object,
'microsecond', 0)
newformat.append(freplace)
elif ch == 'z':
if zreplace is None:
zreplace = ""
if hasattr(object, "utcoffset"):
offset = object.utcoffset()
if offset is not None:
sign = '+'
if offset.days < 0:
offset = -offset
sign = '-'
h, m = divmod(offset, timedelta(hours=1))
assert not m % timedelta(minutes=1), "whole minute"
m //= timedelta(minutes=1)
zreplace = '%c%02d%02d' % (sign, h, m)
assert '%' not in zreplace
newformat.append(zreplace)
elif ch == 'Z':
if Zreplace is None:
Zreplace = ""
if hasattr(object, "tzname"):
s = object.tzname()
if s is not None:
# strftime is going to have at this: escape %
Zreplace = s.replace('%', '%%')
newformat.append(Zreplace)
else:
push('%')
push(ch)
else:
push('%')
else:
push(ch)
newformat = "".join(newformat)
return _time.strftime(newformat, timetuple)
def _call_tzinfo_method(tzinfo, methname, tzinfoarg):
if tzinfo is None:
return None
return getattr(tzinfo, methname)(tzinfoarg)
# Just raise TypeError if the arg isn't None or a string.
def _check_tzname(name):
if name is not None and not isinstance(name, str):
raise TypeError("tzinfo.tzname() must return None or string, "
"not '%s'" % type(name))
# name is the offset-producing method, "utcoffset" or "dst".
# offset is what it returned.
# If offset isn't None or timedelta, raises TypeError.
# If offset is None, returns None.
# Else offset is checked for being in range, and a whole # of minutes.
# If it is, its integer value is returned. Else ValueError is raised.
def _check_utc_offset(name, offset):
assert name in ("utcoffset", "dst")
if offset is None:
return
if not isinstance(offset, timedelta):
raise TypeError("tzinfo.%s() must return None "
"or timedelta, not '%s'" % (name, type(offset)))
if offset % timedelta(minutes=1) or offset.microseconds:
raise ValueError("tzinfo.%s() must return a whole number "
"of minutes, got %s" % (name, offset))
if not -timedelta(1) < offset < timedelta(1):
raise ValueError("%s()=%s, must be must be strictly between"
" -timedelta(hours=24) and timedelta(hours=24)"
% (name, offset))
def _check_date_fields(year, month, day):
if not isinstance(year, int):
raise TypeError('int expected')
if not MINYEAR <= year <= MAXYEAR:
raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year)
if not 1 <= month <= 12:
raise ValueError('month must be in 1..12', month)
dim = _days_in_month(year, month)
if not 1 <= day <= dim:
raise ValueError('day must be in 1..%d' % dim, day)
def _check_time_fields(hour, minute, second, microsecond):
if not isinstance(hour, int):
raise TypeError('int expected')
if not 0 <= hour <= 23:
raise ValueError('hour must be in 0..23', hour)
if not 0 <= minute <= 59:
raise ValueError('minute must be in 0..59', minute)
if not 0 <= second <= 59:
raise ValueError('second must be in 0..59', second)
if not 0 <= microsecond <= 999999:
raise ValueError('microsecond must be in 0..999999', microsecond)
def _check_tzinfo_arg(tz):
if tz is not None and not isinstance(tz, tzinfo):
raise TypeError("tzinfo argument must be None or of a tzinfo subclass")
def _cmperror(x, y):
raise TypeError("can't compare '%s' to '%s'" % (
type(x).__name__, type(y).__name__))
class timedelta:
"""Represent the difference between two datetime objects.
Supported operators:
- add, subtract timedelta
- unary plus, minus, abs
- compare to timedelta
- multiply, divide by int
In addition, datetime supports subtraction of two datetime objects
returning a timedelta, and addition or subtraction of a datetime
and a timedelta giving a datetime.
Representation: (days, seconds, microseconds). Why? Because I
felt like it.
"""
__slots__ = '_days', '_seconds', '_microseconds'
def __new__(cls, days=0, seconds=0, microseconds=0,
milliseconds=0, minutes=0, hours=0, weeks=0):
# Doing this efficiently and accurately in C is going to be difficult
# and error-prone, due to ubiquitous overflow possibilities, and that
# C double doesn't have enough bits of precision to represent
# microseconds over 10K years faithfully. The code here tries to make
# explicit where go-fast assumptions can be relied on, in order to
# guide the C implementation; it's way more convoluted than speed-
# ignoring auto-overflow-to-long idiomatic Python could be.
# XXX Check that all inputs are ints or floats.
# Final values, all integer.
# s and us fit in 32-bit signed ints; d isn't bounded.
d = s = us = 0
# Normalize everything to days, seconds, microseconds.
days += weeks*7
seconds += minutes*60 + hours*3600
microseconds += milliseconds*1000
# Get rid of all fractions, and normalize s and us.
# Take a deep breath <wink>.
if isinstance(days, float):
dayfrac, days = _math.modf(days)
daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.))
assert daysecondswhole == int(daysecondswhole) # can't overflow
s = int(daysecondswhole)
assert days == int(days)
d = int(days)
else:
daysecondsfrac = 0.0
d = days
assert isinstance(daysecondsfrac, float)
assert abs(daysecondsfrac) <= 1.0
assert isinstance(d, int)
assert abs(s) <= 24 * 3600
# days isn't referenced again before redefinition
if isinstance(seconds, float):
secondsfrac, seconds = _math.modf(seconds)
assert seconds == int(seconds)
seconds = int(seconds)
secondsfrac += daysecondsfrac
assert abs(secondsfrac) <= 2.0
else:
secondsfrac = daysecondsfrac
# daysecondsfrac isn't referenced again
assert isinstance(secondsfrac, float)
assert abs(secondsfrac) <= 2.0
assert isinstance(seconds, int)
days, seconds = divmod(seconds, 24*3600)
d += days
s += int(seconds) # can't overflow
assert isinstance(s, int)
assert abs(s) <= 2 * 24 * 3600
# seconds isn't referenced again before redefinition
usdouble = secondsfrac * 1e6
assert abs(usdouble) < 2.1e6 # exact value not critical
# secondsfrac isn't referenced again
if isinstance(microseconds, float):
microseconds += usdouble
microseconds = round(microseconds, 0)
seconds, microseconds = divmod(microseconds, 1e6)
assert microseconds == int(microseconds)
assert seconds == int(seconds)
days, seconds = divmod(seconds, 24.*3600.)
assert days == int(days)
assert seconds == int(seconds)
d += int(days)
s += int(seconds) # can't overflow
assert isinstance(s, int)
assert abs(s) <= 3 * 24 * 3600
else:
seconds, microseconds = divmod(microseconds, 1000000)
days, seconds = divmod(seconds, 24*3600)
d += days
s += int(seconds) # can't overflow
assert isinstance(s, int)
assert abs(s) <= 3 * 24 * 3600
microseconds = float(microseconds)
microseconds += usdouble
microseconds = round(microseconds, 0)
assert abs(s) <= 3 * 24 * 3600
assert abs(microseconds) < 3.1e6
# Just a little bit of carrying possible for microseconds and seconds.
assert isinstance(microseconds, float)
assert int(microseconds) == microseconds
us = int(microseconds)
seconds, us = divmod(us, 1000000)
s += seconds # cant't overflow
assert isinstance(s, int)
days, s = divmod(s, 24*3600)
d += days
assert isinstance(d, int)
assert isinstance(s, int) and 0 <= s < 24*3600
assert isinstance(us, int) and 0 <= us < 1000000
self = object.__new__(cls)
self._days = d
self._seconds = s
self._microseconds = us
if abs(d) > 999999999:
raise OverflowError("timedelta # of days is too large: %d" % d)
return self
def __repr__(self):
if self._microseconds:
return "%s(%d, %d, %d)" % ('datetime.' + self.__class__.__name__,
self._days,
self._seconds,
self._microseconds)
if self._seconds:
return "%s(%d, %d)" % ('datetime.' + self.__class__.__name__,
self._days,
self._seconds)
return "%s(%d)" % ('datetime.' + self.__class__.__name__, self._days)
def __str__(self):
mm, ss = divmod(self._seconds, 60)
hh, mm = divmod(mm, 60)
s = "%d:%02d:%02d" % (hh, mm, ss)
if self._days:
def plural(n):
return n, abs(n) != 1 and "s" or ""
s = ("%d day%s, " % plural(self._days)) + s
if self._microseconds:
s = s + ".%06d" % self._microseconds
return s
def total_seconds(self):
"""Total seconds in the duration."""
return ((self.days * 86400 + self.seconds)*10**6 +
self.microseconds) / 10**6
# Read-only field accessors
@property
def days(self):
"""days"""
return self._days
@property
def seconds(self):
"""seconds"""
return self._seconds
@property
def microseconds(self):
"""microseconds"""
return self._microseconds
def __add__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days + other._days,
self._seconds + other._seconds,
self._microseconds + other._microseconds)
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days - other._days,
self._seconds - other._seconds,
self._microseconds - other._microseconds)
return NotImplemented
def __rsub__(self, other):
if isinstance(other, timedelta):
return -self + other
return NotImplemented
def __neg__(self):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(-self._days,
-self._seconds,
-self._microseconds)
def __pos__(self):
return self
def __abs__(self):
if self._days < 0:
return -self
else:
return self
def __mul__(self, other):
if isinstance(other, int):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days * other,
self._seconds * other,
self._microseconds * other)
if isinstance(other, float):
a, b = other.as_integer_ratio()
return self * a / b
return NotImplemented
__rmul__ = __mul__
def _to_microseconds(self):
return ((self._days * (24*3600) + self._seconds) * 1000000 +
self._microseconds)
def __floordiv__(self, other):
if not isinstance(other, (int, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec // other._to_microseconds()
if isinstance(other, int):
return timedelta(0, 0, usec // other)
def __truediv__(self, other):
if not isinstance(other, (int, float, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec / other._to_microseconds()
if isinstance(other, int):
return timedelta(0, 0, usec / other)
if isinstance(other, float):
a, b = other.as_integer_ratio()
return timedelta(0, 0, b * usec / a)
def __mod__(self, other):
if isinstance(other, timedelta):
r = self._to_microseconds() % other._to_microseconds()
return timedelta(0, 0, r)
return NotImplemented
def __divmod__(self, other):
if isinstance(other, timedelta):
q, r = divmod(self._to_microseconds(),
other._to_microseconds())
return q, timedelta(0, 0, r)
return NotImplemented
# Comparisons of timedelta objects with other.
def __eq__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) == 0
else:
return False
def __ne__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) != 0
else:
return True
def __le__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) <= 0
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) < 0
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) >= 0
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) > 0
else:
_cmperror(self, other)
def _cmp(self, other):
assert isinstance(other, timedelta)
return _cmp(self._getstate(), other._getstate())
def __hash__(self):
return hash(self._getstate())
def __bool__(self):
return (self._days != 0 or
self._seconds != 0 or
self._microseconds != 0)
# Pickle support.
def _getstate(self):
return (self._days, self._seconds, self._microseconds)
def __reduce__(self):
return (self.__class__, self._getstate())
timedelta.min = timedelta(-999999999)
timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59,
microseconds=999999)
timedelta.resolution = timedelta(microseconds=1)
class date:
"""Concrete date type.
Constructors:
__new__()
fromtimestamp()
today()
fromordinal()
Operators:
__repr__, __str__
__cmp__, __hash__
__add__, __radd__, __sub__ (add/radd only with timedelta arg)
Methods:
timetuple()
toordinal()
weekday()
isoweekday(), isocalendar(), isoformat()
ctime()
strftime()
Properties (readonly):
year, month, day
"""
__slots__ = '_year', '_month', '_day'
def __new__(cls, year, month=None, day=None):
"""Constructor.
Arguments:
year, month, day (required, base 1)
"""
if (isinstance(year, bytes) and len(year) == 4 and
1 <= year[2] <= 12 and month is None): # Month is sane
# Pickle support
self = object.__new__(cls)
self.__setstate(year)
return self
_check_date_fields(year, month, day)
self = object.__new__(cls)
self._year = year
self._month = month
self._day = day
return self
# Additional constructors
@classmethod
def fromtimestamp(cls, t):
"Construct a date from a POSIX timestamp (like time.time())."
y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t)
return cls(y, m, d)
@classmethod
def today(cls):
"Construct a date from time.time()."
t = _time.time()
return cls.fromtimestamp(t)
@classmethod
def fromordinal(cls, n):
"""Contruct a date from a proleptic Gregorian ordinal.
January 1 of year 1 is day 1. Only the year, month and day are
non-zero in the result.
"""
y, m, d = _ord2ymd(n)
return cls(y, m, d)
# Conversions to string
def __repr__(self):
"""Convert to formal string, for repr().
>>> dt = datetime(2010, 1, 1)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0)'
>>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)'
"""
return "%s(%d, %d, %d)" % ('datetime.' + self.__class__.__name__,
self._year,
self._month,
self._day)
# XXX These shouldn't depend on time.localtime(), because that
# clips the usable dates to [1970 .. 2038). At least ctime() is
# easily done without using strftime() -- that's better too because
# strftime("%c", ...) is locale specific.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d 00:00:00 %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day, self._year)
def strftime(self, fmt):
"Format using strftime()."
return _wrap_strftime(self, fmt, self.timetuple())
def __format__(self, fmt):
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
def isoformat(self):
"""Return the date formatted according to ISO.
This is 'YYYY-MM-DD'.
References:
- http://www.w3.org/TR/NOTE-datetime
- http://www.cl.cam.ac.uk/~mgk25/iso-time.html
"""
return "%04d-%02d-%02d" % (self._year, self._month, self._day)
__str__ = isoformat
# Read-only field accessors
@property
def year(self):
"""year (1-9999)"""
return self._year
@property
def month(self):
"""month (1-12)"""
return self._month
@property
def day(self):
"""day (1-31)"""
return self._day
# Standard conversions, __cmp__, __hash__ (and helpers)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
return _build_struct_time(self._year, self._month, self._day,
0, 0, 0, -1)
def toordinal(self):
"""Return proleptic Gregorian ordinal for the year, month and day.
January 1 of year 1 is day 1. Only the year, month and day values
contribute to the result.
"""
return _ymd2ord(self._year, self._month, self._day)
def replace(self, year=None, month=None, day=None):
"""Return a new date with new values for the specified fields."""
if year is None:
year = self._year
if month is None:
month = self._month
if day is None:
day = self._day
_check_date_fields(year, month, day)
return date(year, month, day)
# Comparisons of date objects with other.
def __eq__(self, other):
if isinstance(other, date):
return self._cmp(other) == 0
return NotImplemented
def __ne__(self, other):
if isinstance(other, date):
return self._cmp(other) != 0
return NotImplemented
def __le__(self, other):
if isinstance(other, date):
return self._cmp(other) <= 0
return NotImplemented
def __lt__(self, other):
if isinstance(other, date):
return self._cmp(other) < 0
return NotImplemented
def __ge__(self, other):
if isinstance(other, date):
return self._cmp(other) >= 0
return NotImplemented
def __gt__(self, other):
if isinstance(other, date):
return self._cmp(other) > 0
return NotImplemented
def _cmp(self, other):
assert isinstance(other, date)
y, m, d = self._year, self._month, self._day
y2, m2, d2 = other._year, other._month, other._day
return _cmp((y, m, d), (y2, m2, d2))
def __hash__(self):
"Hash."
return hash(self._getstate())
# Computations
def __add__(self, other):
"Add a date to a timedelta."
if isinstance(other, timedelta):
o = self.toordinal() + other.days
if 0 < o <= _MAXORDINAL:
return date.fromordinal(o)
raise OverflowError("result out of range")
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""Subtract two dates, or a date and a timedelta."""
if isinstance(other, timedelta):
return self + timedelta(-other.days)
if isinstance(other, date):
days1 = self.toordinal()
days2 = other.toordinal()
return timedelta(days1 - days2)
return NotImplemented
def weekday(self):
"Return day of the week, where Monday == 0 ... Sunday == 6."
return (self.toordinal() + 6) % 7
# Day-of-the-week and week-of-the-year, according to ISO
def isoweekday(self):
"Return day of the week, where Monday == 1 ... Sunday == 7."
# 1-Jan-0001 is a Monday
return self.toordinal() % 7 or 7
def isocalendar(self):
"""Return a 3-tuple containing ISO year, week number, and weekday.
The first ISO week of the year is the (Mon-Sun) week
containing the year's first Thursday; everything else derives
from that.
The first week is 1; Monday is 1 ... Sunday is 7.
ISO calendar algorithm taken from
http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
"""
year = self._year
week1monday = _isoweek1monday(year)
today = _ymd2ord(self._year, self._month, self._day)
# Internally, week and day have origin 0
week, day = divmod(today - week1monday, 7)
if week < 0:
year -= 1
week1monday = _isoweek1monday(year)
week, day = divmod(today - week1monday, 7)
elif week >= 52:
if today >= _isoweek1monday(year+1):
year += 1
week = 0
return year, week+1, day+1
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
return bytes([yhi, ylo, self._month, self._day]),
def __setstate(self, string):
if len(string) != 4 or not (1 <= string[2] <= 12):
raise TypeError("not enough arguments")
yhi, ylo, self._month, self._day = string
self._year = yhi * 256 + ylo
def __reduce__(self):
return (self.__class__, self._getstate())
_date_class = date # so functions w/ args named "date" can get at the class
date.min = date(1, 1, 1)
date.max = date(9999, 12, 31)
date.resolution = timedelta(days=1)
class tzinfo:
"""Abstract base class for time zone info classes.
Subclasses must override the name(), utcoffset() and dst() methods.
"""
__slots__ = ()
def tzname(self, dt):
"datetime -> string name of time zone."
raise NotImplementedError("tzinfo subclass must override tzname()")
def utcoffset(self, dt):
"datetime -> minutes east of UTC (negative for west of UTC)"
raise NotImplementedError("tzinfo subclass must override utcoffset()")
def dst(self, dt):
"""datetime -> DST offset in minutes east of UTC.
Return 0 if DST not in effect. utcoffset() must include the DST
offset.
"""
raise NotImplementedError("tzinfo subclass must override dst()")
def fromutc(self, dt):
"datetime in UTC -> datetime in local time."
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# See the long comment block at the end of this file for an
# explanation of this algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
if delta:
dt += delta
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
return dt + dtdst
# Pickle support.
def __reduce__(self):
getinitargs = getattr(self, "__getinitargs__", None)
if getinitargs:
args = getinitargs()
else:
args = ()
getstate = getattr(self, "__getstate__", None)
if getstate:
state = getstate()
else:
state = getattr(self, "__dict__", None) or None
if state is None:
return (self.__class__, args)
else:
return (self.__class__, args, state)
_tzinfo_class = tzinfo
class time:
"""Time with time zone.
Constructors:
__new__()
Operators:
__repr__, __str__
__cmp__, __hash__
Methods:
strftime()
isoformat()
utcoffset()
tzname()
dst()
Properties (readonly):
hour, minute, second, microsecond, tzinfo
"""
def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):
"""Constructor.
Arguments:
hour, minute (required)
second, microsecond (default to zero)
tzinfo (default to None)
"""
self = object.__new__(cls)
if isinstance(hour, bytes) and len(hour) == 6:
# Pickle support
self.__setstate(hour, minute or None)
return self
_check_tzinfo_arg(tzinfo)
_check_time_fields(hour, minute, second, microsecond)
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
# Standard conversions, __hash__ (and helpers)
# Comparisons of time objects with other.
def __eq__(self, other):
if isinstance(other, time):
return self._cmp(other, allow_mixed=True) == 0
else:
return False
def __ne__(self, other):
if isinstance(other, time):
return self._cmp(other, allow_mixed=True) != 0
else:
return True
def __le__(self, other):
if isinstance(other, time):
return self._cmp(other) <= 0
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, time):
return self._cmp(other) < 0
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, time):
return self._cmp(other) >= 0
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, time):
return self._cmp(other) > 0
else:
_cmperror(self, other)
def _cmp(self, other, allow_mixed=False):
assert isinstance(other, time)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self.utcoffset()
otoff = other.utcoffset()
base_compare = myoff == otoff
if base_compare:
return _cmp((self._hour, self._minute, self._second,
self._microsecond),
(other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
if allow_mixed:
return 2 # arbitrary non-zero value
else:
raise TypeError("cannot compare naive and aware times")
myhhmm = self._hour * 60 + self._minute - myoff//timedelta(minutes=1)
othhmm = other._hour * 60 + other._minute - otoff//timedelta(minutes=1)
return _cmp((myhhmm, self._second, self._microsecond),
(othhmm, other._second, other._microsecond))
def __hash__(self):
"""Hash."""
tzoff = self.utcoffset()
if not tzoff: # zero or None
return hash(self._getstate()[0])
h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff,
timedelta(hours=1))
assert not m % timedelta(minutes=1), "whole minute"
m //= timedelta(minutes=1)
if 0 <= h < 24:
return hash(time(h, m, self.second, self.microsecond))
return hash((h, m, self.second, self.microsecond))
# Conversion to string
def _tzstr(self, sep=":"):
"""Return formatted timezone offset (+xx:xx) or None."""
off = self.utcoffset()
if off is not None:
if off.days < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, timedelta(hours=1))
assert not mm % timedelta(minutes=1), "whole minute"
mm //= timedelta(minutes=1)
assert 0 <= hh < 24
off = "%s%02d%s%02d" % (sign, hh, sep, mm)
return off
def __repr__(self):
"""Convert to formal string, for repr()."""
if self._microsecond != 0:
s = ", %d, %d" % (self._second, self._microsecond)
elif self._second != 0:
s = ", %d" % self._second
else:
s = ""
s= "%s(%d, %d%s)" % ('datetime.' + self.__class__.__name__,
self._hour, self._minute, s)
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
return s
def isoformat(self):
"""Return the time formatted according to ISO.
This is 'HH:MM:SS.mmmmmm+zz:zz', or 'HH:MM:SS+zz:zz' if
self.microsecond == 0.
"""
s = _format_time(self._hour, self._minute, self._second,
self._microsecond)
tz = self._tzstr()
if tz:
s += tz
return s
__str__ = isoformat
def strftime(self, fmt):
"""Format using strftime(). The date part of the timestamp passed
to underlying strftime should not be used.
"""
# The year must be >= 1000 else Python's strftime implementation
# can raise a bogus exception.
timetuple = (1900, 1, 1,
self._hour, self._minute, self._second,
0, 1, -1)
return _wrap_strftime(self, fmt, timetuple)
def __format__(self, fmt):
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
# Timezone functions
def utcoffset(self):
"""Return the timezone offset in minutes east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(None)
_check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
if self._tzinfo is None:
return None
name = self._tzinfo.tzname(None)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (in minutes
eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(None)
_check_utc_offset("dst", offset)
return offset
def replace(self, hour=None, minute=None, second=None, microsecond=None,
tzinfo=True):
"""Return a new time with new values for the specified fields."""
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
_check_time_fields(hour, minute, second, microsecond)
_check_tzinfo_arg(tzinfo)
return time(hour, minute, second, microsecond, tzinfo)
def __bool__(self):
if self.second or self.microsecond:
return True
offset = self.utcoffset() or timedelta(0)
return timedelta(hours=self.hour, minutes=self.minute) != offset
# Pickle support.
def _getstate(self):
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
basestate = bytes([self._hour, self._minute, self._second,
us1, us2, us3])
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
if len(string) != 6 or string[0] >= 24:
raise TypeError("an integer is required")
(self._hour, self._minute, self._second,
us1, us2, us3) = string
self._microsecond = (((us1 << 8) | us2) << 8) | us3
if tzinfo is None or isinstance(tzinfo, _tzinfo_class):
self._tzinfo = tzinfo
else:
raise TypeError("bad tzinfo state arg %r" % tzinfo)
def __reduce__(self):
return (time, self._getstate())
_time_class = time # so functions w/ args named "time" can get at the class
time.min = time(0, 0, 0)
time.max = time(23, 59, 59, 999999)
time.resolution = timedelta(microseconds=1)
class datetime(date):
"""datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]])
The year, month and day arguments are required. tzinfo may be None, or an
instance of a tzinfo subclass. The remaining arguments may be ints.
"""
__slots__ = date.__slots__ + (
'_hour', '_minute', '_second',
'_microsecond', '_tzinfo')
def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0,
microsecond=0, tzinfo=None):
if isinstance(year, bytes) and len(year) == 10:
# Pickle support
self = date.__new__(cls, year[:4])
self.__setstate(year, month)
return self
_check_tzinfo_arg(tzinfo)
_check_time_fields(hour, minute, second, microsecond)
self = date.__new__(cls, year, month, day)
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
@classmethod
def fromtimestamp(cls, t, tz=None):
"""Construct a datetime from a POSIX timestamp (like time.time()).
A timezone info object may be passed in as well.
"""
_check_tzinfo_arg(tz)
converter = _time.localtime if tz is None else _time.gmtime
t, frac = divmod(t, 1.0)
us = int(frac * 1e6)
# If timestamp is less than one microsecond smaller than a
# full second, us can be rounded up to 1000000. In this case,
# roll over to seconds, otherwise, ValueError is raised
# by the constructor.
if us == 1000000:
t += 1
us = 0
y, m, d, hh, mm, ss, weekday, jday, dst = converter(t)
ss = min(ss, 59) # clamp out leap seconds if the platform has them
result = cls(y, m, d, hh, mm, ss, us, tz)
if tz is not None:
result = tz.fromutc(result)
return result
@classmethod
def utcfromtimestamp(cls, t):
"Construct a UTC datetime from a POSIX timestamp (like time.time())."
t, frac = divmod(t, 1.0)
us = int(frac * 1e6)
# If timestamp is less than one microsecond smaller than a
# full second, us can be rounded up to 1000000. In this case,
# roll over to seconds, otherwise, ValueError is raised
# by the constructor.
if us == 1000000:
t += 1
us = 0
y, m, d, hh, mm, ss, weekday, jday, dst = _time.gmtime(t)
ss = min(ss, 59) # clamp out leap seconds if the platform has them
return cls(y, m, d, hh, mm, ss, us)
# XXX This is supposed to do better than we *can* do by using time.time(),
# XXX if the platform supports a more accurate way. The C implementation
# XXX uses gettimeofday on platforms that have it, but that isn't
# XXX available from Python. So now() may return different results
# XXX across the implementations.
@classmethod
def now(cls, tz=None):
"Construct a datetime from time.time() and optional time zone info."
t = _time.time()
return cls.fromtimestamp(t, tz)
@classmethod
def utcnow(cls):
"Construct a UTC datetime from time.time()."
t = _time.time()
return cls.utcfromtimestamp(t)
@classmethod
def combine(cls, date, time):
"Construct a datetime from a given date and a given time."
if not isinstance(date, _date_class):
raise TypeError("date argument must be a date instance")
if not isinstance(time, _time_class):
raise TypeError("time argument must be a time instance")
return cls(date.year, date.month, date.day,
time.hour, time.minute, time.second, time.microsecond,
time.tzinfo)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
dst = self.dst()
if dst is None:
dst = -1
elif dst:
dst = 1
else:
dst = 0
return _build_struct_time(self.year, self.month, self.day,
self.hour, self.minute, self.second,
dst)
def timestamp(self):
"Return POSIX timestamp as float"
if self._tzinfo is None:
return _time.mktime((self.year, self.month, self.day,
self.hour, self.minute, self.second,
-1, -1, -1)) + self.microsecond / 1e6
else:
return (self - _EPOCH).total_seconds()
def utctimetuple(self):
"Return UTC time tuple compatible with time.gmtime()."
offset = self.utcoffset()
if offset:
self -= offset
y, m, d = self.year, self.month, self.day
hh, mm, ss = self.hour, self.minute, self.second
return _build_struct_time(y, m, d, hh, mm, ss, 0)
def date(self):
"Return the date part."
return date(self._year, self._month, self._day)
def time(self):
"Return the time part, with tzinfo None."
return time(self.hour, self.minute, self.second, self.microsecond)
def timetz(self):
"Return the time part, with same tzinfo."
return time(self.hour, self.minute, self.second, self.microsecond,
self._tzinfo)
def replace(self, year=None, month=None, day=None, hour=None,
minute=None, second=None, microsecond=None, tzinfo=True):
"""Return a new datetime with new values for the specified fields."""
if year is None:
year = self.year
if month is None:
month = self.month
if day is None:
day = self.day
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
_check_date_fields(year, month, day)
_check_time_fields(hour, minute, second, microsecond)
_check_tzinfo_arg(tzinfo)
return datetime(year, month, day, hour, minute, second,
microsecond, tzinfo)
def astimezone(self, tz=None):
if tz is None:
if self.tzinfo is None:
raise ValueError("astimezone() requires an aware datetime")
ts = (self - _EPOCH) // timedelta(seconds=1)
localtm = _time.localtime(ts)
local = datetime(*localtm[:6])
try:
# Extract TZ data if available
gmtoff = localtm.tm_gmtoff
zone = localtm.tm_zone
except AttributeError:
# Compute UTC offset and compare with the value implied
# by tm_isdst. If the values match, use the zone name
# implied by tm_isdst.
delta = local - datetime(*_time.gmtime(ts)[:6])
dst = _time.daylight and localtm.tm_isdst > 0
gmtoff = -(_time.altzone if dst else _time.timezone)
if delta == timedelta(seconds=gmtoff):
tz = timezone(delta, _time.tzname[dst])
else:
tz = timezone(delta)
else:
tz = timezone(timedelta(seconds=gmtoff), zone)
elif not isinstance(tz, tzinfo):
raise TypeError("tz argument must be an instance of tzinfo")
mytz = self.tzinfo
if mytz is None:
raise ValueError("astimezone() requires an aware datetime")
if tz is mytz:
return self
# Convert self to UTC, and attach the new time zone object.
myoffset = self.utcoffset()
if myoffset is None:
raise ValueError("astimezone() requires an aware datetime")
utc = (self - myoffset).replace(tzinfo=tz)
# Convert from UTC to tz's local time.
return tz.fromutc(utc)
# Ways to produce a string.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d %02d:%02d:%02d %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day,
self._hour, self._minute, self._second,
self._year)
def isoformat(self, sep='T'):
"""Return the time formatted according to ISO.
This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if
self.microsecond == 0.
If self.tzinfo is not None, the UTC offset is also attached, giving
'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'.
Optional argument sep specifies the separator between date and
time, default 'T'.
"""
s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day,
sep) +
_format_time(self._hour, self._minute, self._second,
self._microsecond))
off = self.utcoffset()
if off is not None:
if off.days < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, timedelta(hours=1))
assert not mm % timedelta(minutes=1), "whole minute"
mm //= timedelta(minutes=1)
s += "%s%02d:%02d" % (sign, hh, mm)
return s
def __repr__(self):
"""Convert to formal string, for repr()."""
L = [self._year, self._month, self._day, # These are never zero
self._hour, self._minute, self._second, self._microsecond]
if L[-1] == 0:
del L[-1]
if L[-1] == 0:
del L[-1]
s = ", ".join(map(str, L))
s = "%s(%s)" % ('datetime.' + self.__class__.__name__, s)
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
return s
def __str__(self):
"Convert to string, for str()."
return self.isoformat(sep=' ')
@classmethod
def strptime(cls, date_string, format):
'string, format -> new datetime parsed from a string (like time.strptime()).'
import _strptime
return _strptime._strptime_datetime(cls, date_string, format)
def utcoffset(self):
"""Return the timezone offset in minutes east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(self)
_check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
name = _call_tzinfo_method(self._tzinfo, "tzname", self)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (in minutes
eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(self)
_check_utc_offset("dst", offset)
return offset
# Comparisons of datetime objects with other.
def __eq__(self, other):
if isinstance(other, datetime):
return self._cmp(other, allow_mixed=True) == 0
elif not isinstance(other, date):
return NotImplemented
else:
return False
def __ne__(self, other):
if isinstance(other, datetime):
return self._cmp(other, allow_mixed=True) != 0
elif not isinstance(other, date):
return NotImplemented
else:
return True
def __le__(self, other):
if isinstance(other, datetime):
return self._cmp(other) <= 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) < 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, datetime):
return self._cmp(other) >= 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) > 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def _cmp(self, other, allow_mixed=False):
assert isinstance(other, datetime)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self.utcoffset()
otoff = other.utcoffset()
base_compare = myoff == otoff
if base_compare:
return _cmp((self._year, self._month, self._day,
self._hour, self._minute, self._second,
self._microsecond),
(other._year, other._month, other._day,
other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
if allow_mixed:
return 2 # arbitrary non-zero value
else:
raise TypeError("cannot compare naive and aware datetimes")
# XXX What follows could be done more efficiently...
diff = self - other # this will take offsets into account
if diff.days < 0:
return -1
return diff and 1 or 0
def __add__(self, other):
"Add a datetime and a timedelta."
if not isinstance(other, timedelta):
return NotImplemented
delta = timedelta(self.toordinal(),
hours=self._hour,
minutes=self._minute,
seconds=self._second,
microseconds=self._microsecond)
delta += other
hour, rem = divmod(delta.seconds, 3600)
minute, second = divmod(rem, 60)
if 0 < delta.days <= _MAXORDINAL:
return datetime.combine(date.fromordinal(delta.days),
time(hour, minute, second,
delta.microseconds,
tzinfo=self._tzinfo))
raise OverflowError("result out of range")
__radd__ = __add__
def __sub__(self, other):
"Subtract two datetimes, or a datetime and a timedelta."
if not isinstance(other, datetime):
if isinstance(other, timedelta):
return self + -other
return NotImplemented
days1 = self.toordinal()
days2 = other.toordinal()
secs1 = self._second + self._minute * 60 + self._hour * 3600
secs2 = other._second + other._minute * 60 + other._hour * 3600
base = timedelta(days1 - days2,
secs1 - secs2,
self._microsecond - other._microsecond)
if self._tzinfo is other._tzinfo:
return base
myoff = self.utcoffset()
otoff = other.utcoffset()
if myoff == otoff:
return base
if myoff is None or otoff is None:
raise TypeError("cannot mix naive and timezone-aware time")
return base + otoff - myoff
def __hash__(self):
tzoff = self.utcoffset()
if tzoff is None:
return hash(self._getstate()[0])
days = _ymd2ord(self.year, self.month, self.day)
seconds = self.hour * 3600 + self.minute * 60 + self.second
return hash(timedelta(days, seconds, self.microsecond) - tzoff)
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
basestate = bytes([yhi, ylo, self._month, self._day,
self._hour, self._minute, self._second,
us1, us2, us3])
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
(yhi, ylo, self._month, self._day, self._hour,
self._minute, self._second, us1, us2, us3) = string
self._year = yhi * 256 + ylo
self._microsecond = (((us1 << 8) | us2) << 8) | us3
if tzinfo is None or isinstance(tzinfo, _tzinfo_class):
self._tzinfo = tzinfo
else:
raise TypeError("bad tzinfo state arg %r" % tzinfo)
def __reduce__(self):
return (self.__class__, self._getstate())
datetime.min = datetime(1, 1, 1)
datetime.max = datetime(9999, 12, 31, 23, 59, 59, 999999)
datetime.resolution = timedelta(microseconds=1)
def _isoweek1monday(year):
# Helper to calculate the day number of the Monday starting week 1
# XXX This could be done more efficiently
THURSDAY = 3
firstday = _ymd2ord(year, 1, 1)
firstweekday = (firstday + 6) % 7 # See weekday() above
week1monday = firstday - firstweekday
if firstweekday > THURSDAY:
week1monday += 7
return week1monday
class timezone(tzinfo):
__slots__ = '_offset', '_name'
# Sentinel value to disallow None
_Omitted = object()
def __new__(cls, offset, name=_Omitted):
if not isinstance(offset, timedelta):
raise TypeError("offset must be a timedelta")
if name is cls._Omitted:
if not offset:
return cls.utc
name = None
elif not isinstance(name, str):
raise TypeError("name must be a string")
if not cls._minoffset <= offset <= cls._maxoffset:
raise ValueError("offset must be a timedelta"
" strictly between -timedelta(hours=24) and"
" timedelta(hours=24).")
if (offset.microseconds != 0 or
offset.seconds % 60 != 0):
raise ValueError("offset must be a timedelta"
" representing a whole number of minutes")
return cls._create(offset, name)
@classmethod
def _create(cls, offset, name=None):
self = tzinfo.__new__(cls)
self._offset = offset
self._name = name
return self
def __getinitargs__(self):
"""pickle support"""
if self._name is None:
return (self._offset,)
return (self._offset, self._name)
def __eq__(self, other):
if type(other) != timezone:
return False
return self._offset == other._offset
def __hash__(self):
return hash(self._offset)
def __repr__(self):
"""Convert to formal string, for repr().
>>> tz = timezone.utc
>>> repr(tz)
'datetime.timezone.utc'
>>> tz = timezone(timedelta(hours=-5), 'EST')
>>> repr(tz)
"datetime.timezone(datetime.timedelta(-1, 68400), 'EST')"
"""
if self is self.utc:
return 'datetime.timezone.utc'
if self._name is None:
return "%s(%r)" % ('datetime.' + self.__class__.__name__,
self._offset)
return "%s(%r, %r)" % ('datetime.' + self.__class__.__name__,
self._offset, self._name)
def __str__(self):
return self.tzname(None)
def utcoffset(self, dt):
if isinstance(dt, datetime) or dt is None:
return self._offset
raise TypeError("utcoffset() argument must be a datetime instance"
" or None")
def tzname(self, dt):
if isinstance(dt, datetime) or dt is None:
if self._name is None:
return self._name_from_offset(self._offset)
return self._name
raise TypeError("tzname() argument must be a datetime instance"
" or None")
def dst(self, dt):
if isinstance(dt, datetime) or dt is None:
return None
raise TypeError("dst() argument must be a datetime instance"
" or None")
def fromutc(self, dt):
if isinstance(dt, datetime):
if dt.tzinfo is not self:
raise ValueError("fromutc: dt.tzinfo "
"is not self")
return dt + self._offset
raise TypeError("fromutc() argument must be a datetime instance"
" or None")
_maxoffset = timedelta(hours=23, minutes=59)
_minoffset = -_maxoffset
@staticmethod
def _name_from_offset(delta):
if delta < timedelta(0):
sign = '-'
delta = -delta
else:
sign = '+'
hours, rest = divmod(delta, timedelta(hours=1))
minutes = rest // timedelta(minutes=1)
return 'UTC{}{:02d}:{:02d}'.format(sign, hours, minutes)
timezone.utc = timezone._create(timedelta(0))
timezone.min = timezone._create(timezone._minoffset)
timezone.max = timezone._create(timezone._maxoffset)
_EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc)
"""
Some time zone algebra. For a datetime x, let
x.n = x stripped of its timezone -- its naive time.
x.o = x.utcoffset(), and assuming that doesn't raise an exception or
return None
x.d = x.dst(), and assuming that doesn't raise an exception or
return None
x.s = x's standard offset, x.o - x.d
Now some derived rules, where k is a duration (timedelta).
1. x.o = x.s + x.d
This follows from the definition of x.s.
2. If x and y have the same tzinfo member, x.s = y.s.
This is actually a requirement, an assumption we need to make about
sane tzinfo classes.
3. The naive UTC time corresponding to x is x.n - x.o.
This is again a requirement for a sane tzinfo class.
4. (x+k).s = x.s
This follows from #2, and that datimetimetz+timedelta preserves tzinfo.
5. (x+k).n = x.n + k
Again follows from how arithmetic is defined.
Now we can explain tz.fromutc(x). Let's assume it's an interesting case
(meaning that the various tzinfo methods exist, and don't blow up or return
None when called).
The function wants to return a datetime y with timezone tz, equivalent to x.
x is already in UTC.
By #3, we want
y.n - y.o = x.n [1]
The algorithm starts by attaching tz to x.n, and calling that y. So
x.n = y.n at the start. Then it wants to add a duration k to y, so that [1]
becomes true; in effect, we want to solve [2] for k:
(y+k).n - (y+k).o = x.n [2]
By #1, this is the same as
(y+k).n - ((y+k).s + (y+k).d) = x.n [3]
By #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start.
Substituting that into [3],
x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving
k - (y+k).s - (y+k).d = 0; rearranging,
k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so
k = y.s - (y+k).d
On the RHS, (y+k).d can't be computed directly, but y.s can be, and we
approximate k by ignoring the (y+k).d term at first. Note that k can't be
very large, since all offset-returning methods return a duration of magnitude
less than 24 hours. For that reason, if y is firmly in std time, (y+k).d must
be 0, so ignoring it has no consequence then.
In any case, the new value is
z = y + y.s [4]
It's helpful to step back at look at [4] from a higher level: it's simply
mapping from UTC to tz's standard time.
At this point, if
z.n - z.o = x.n [5]
we have an equivalent time, and are almost done. The insecurity here is
at the start of daylight time. Picture US Eastern for concreteness. The wall
time jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good
sense then. The docs ask that an Eastern tzinfo class consider such a time to
be EDT (because it's "after 2"), which is a redundant spelling of 1:MM EST
on the day DST starts. We want to return the 1:MM EST spelling because that's
the only spelling that makes sense on the local wall clock.
In fact, if [5] holds at this point, we do have the standard-time spelling,
but that takes a bit of proof. We first prove a stronger result. What's the
difference between the LHS and RHS of [5]? Let
diff = x.n - (z.n - z.o) [6]
Now
z.n = by [4]
(y + y.s).n = by #5
y.n + y.s = since y.n = x.n
x.n + y.s = since z and y are have the same tzinfo member,
y.s = z.s by #2
x.n + z.s
Plugging that back into [6] gives
diff =
x.n - ((x.n + z.s) - z.o) = expanding
x.n - x.n - z.s + z.o = cancelling
- z.s + z.o = by #2
z.d
So diff = z.d.
If [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time
spelling we wanted in the endcase described above. We're done. Contrarily,
if z.d = 0, then we have a UTC equivalent, and are also done.
If [5] is not true now, diff = z.d != 0, and z.d is the offset we need to
add to z (in effect, z is in tz's standard time, and we need to shift the
local clock into tz's daylight time).
Let
z' = z + z.d = z + diff [7]
and we can again ask whether
z'.n - z'.o = x.n [8]
If so, we're done. If not, the tzinfo class is insane, according to the
assumptions we've made. This also requires a bit of proof. As before, let's
compute the difference between the LHS and RHS of [8] (and skipping some of
the justifications for the kinds of substitutions we've done several times
already):
diff' = x.n - (z'.n - z'.o) = replacing z'.n via [7]
x.n - (z.n + diff - z'.o) = replacing diff via [6]
x.n - (z.n + x.n - (z.n - z.o) - z'.o) =
x.n - z.n - x.n + z.n - z.o + z'.o = cancel x.n
- z.n + z.n - z.o + z'.o = cancel z.n
- z.o + z'.o = #1 twice
-z.s - z.d + z'.s + z'.d = z and z' have same tzinfo
z'.d - z.d
So z' is UTC-equivalent to x iff z'.d = z.d at this point. If they are equal,
we've found the UTC-equivalent so are done. In fact, we stop with [7] and
return z', not bothering to compute z'.d.
How could z.d and z'd differ? z' = z + z.d [7], so merely moving z' by
a dst() offset, and starting *from* a time already in DST (we know z.d != 0),
would have to change the result dst() returns: we start in DST, and moving
a little further into it takes us out of DST.
There isn't a sane case where this can happen. The closest it gets is at
the end of DST, where there's an hour in UTC with no spelling in a hybrid
tzinfo class. In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT. During
that hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM
UTC) because the docs insist on that, but 0:MM is taken as being in daylight
time (4:MM UTC). There is no local time mapping to 5:MM UTC. The local
clock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in
standard time. Since that's what the local clock *does*, we want to map both
UTC hours 5:MM and 6:MM to 1:MM Eastern. The result is ambiguous
in local time, but so it goes -- it's the way the local clock works.
When x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0,
so z=0:MM. z.d=60 (minutes) then, so [5] doesn't hold and we keep going.
z' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8]
(correctly) concludes that z' is not UTC-equivalent to x.
Because we know z.d said z was in daylight time (else [5] would have held and
we would have stopped then), and we know z.d != z'.d (else [8] would have held
and we have stopped then), and there are only 2 possible values dst() can
return in Eastern, it follows that z'.d must be 0 (which it is in the example,
but the reasoning doesn't depend on the example -- it depends on there being
two possible dst() outcomes, one zero and the other non-zero). Therefore
z' must be in standard time, and is the spelling we want in this case.
Note again that z' is not UTC-equivalent as far as the hybrid tzinfo class is
concerned (because it takes z' as being in standard time rather than the
daylight time we intend here), but returning it gives the real-life "local
clock repeats an hour" behavior when mapping the "unspellable" UTC hour into
tz.
When the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with
the 1:MM standard time spelling we want.
So how can this break? One of the assumptions must be violated. Two
possibilities:
1) [2] effectively says that y.s is invariant across all y belong to a given
time zone. This isn't true if, for political reasons or continental drift,
a region decides to change its base offset from UTC.
2) There may be versions of "double daylight" time where the tail end of
the analysis gives up a step too early. I haven't thought about that
enough to say.
In any case, it's clear that the default fromutc() is strong enough to handle
"almost all" time zones: so long as the standard offset is invariant, it
doesn't matter if daylight time transition points change from year to year, or
if daylight time is skipped in some years; it doesn't matter how large or
small dst() may get within its bounds; and it doesn't even matter if some
perverse time zone returns a negative dst()). So a breaking case must be
pretty bizarre, and a tzinfo subclass can override fromutc() if it is.
"""
#brython does not have a _datetime, so lets comment this out for now.
#try:
# from _datetime import *
#except ImportError:
# pass
#else:
# # Clean up unused names
# del (_DAYNAMES, _DAYS_BEFORE_MONTH, _DAYS_IN_MONTH,
# _DI100Y, _DI400Y, _DI4Y, _MAXORDINAL, _MONTHNAMES,
# _build_struct_time, _call_tzinfo_method, _check_date_fields,
# _check_time_fields, _check_tzinfo_arg, _check_tzname,
# _check_utc_offset, _cmp, _cmperror, _date_class, _days_before_month,
# _days_before_year, _days_in_month, _format_time, _is_leap,
# _isoweek1monday, _math, _ord2ymd, _time, _time_class, _tzinfo_class,
# _wrap_strftime, _ymd2ord)
# # XXX Since import * above excludes names that start with _,
# # docstring does not get overwritten. In the future, it may be
# # appropriate to maintain a single module level docstring and
# # remove the following line.
# #from _datetime import __doc__
| gpl-3.0 |
alexbrasetvik/Piped | contrib/amqp/setup.py | 2 | 2444 | # Copyright (c) 2010-2011, Found IT A/S and Piped Project Contributors.
# See LICENSE for details.
import os
import sys
from setuptools import setup, find_packages
# When pip installs anything from packages, py_modules, or ext_modules that
# includes a twistd plugin (which are installed to twisted/plugins/),
# setuptools/distribute writes a Package.egg-info/top_level.txt that includes
# "twisted". If you later uninstall Package with `pip uninstall Package`,
# pip removes all of twisted/ instead of just Package's twistd plugins. See
# https://github.com/pypa/pip/issues/355
#
# To work around this problem, we monkeypatch
# setuptools.command.egg_info.write_toplevel_names to not write the line
# "twisted". This fixes the behavior of `pip uninstall Package`. Note that
# even with this workaround, `pip uninstall Package` still correctly uninstalls
# Package's twistd plugins from twisted/plugins/, since pip also uses
# Package.egg-info/installed-files.txt to determine what to uninstall,
# and the paths to the plugin files are indeed listed in installed-files.txt.
try:
from setuptools.command import egg_info
egg_info.write_toplevel_names
except (ImportError, AttributeError):
pass
else:
def _top_level_package(name):
return name.split('.', 1)[0]
def _hacked_write_toplevel_names(cmd, basename, filename):
pkgs = dict.fromkeys(
[_top_level_package(k)
for k in cmd.distribution.iter_distribution_names()
if _top_level_package(k) != 'piped'
]
)
cmd.write_file("top-level names", filename, '\n'.join(pkgs) + '\n')
egg_info.write_toplevel_names = _hacked_write_toplevel_names
here = os.path.abspath(os.path.dirname(__file__))
# add ourselves to the package path so we can get the version from the source tree
sys.path.insert(0, here)
import piped_amqp
setup(
name = 'piped.contrib.amqp',
license = 'MIT',
author = 'Piped Project Contributors',
author_email = 'piped@librelist.com',
url = 'http://piped.io',
packages = find_packages(where=here) + ['piped.plugins'],
version = str(piped_amqp.version),
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Plugins',
'Framework :: Twisted',
'Operating System :: OS Independent',
],
description = 'AMQP support for Piped.',
install_requires = ['piped', 'pika', 'setuptools']
) | mit |
SahilTikale/haas | tests/unit/model.py | 2 | 3548 | """Functional tests for model.py"""
# Some Notes:
#
# * We don't really have any agreed-upon requirements about what __repr__
# should print, but I'm fairly certain I hit an argument mistmatch at
# some point, which is definitely wrong. The test_repr methods are there just
# to make sure it isn't throwing an exception.
from hil.model import Node, Nic, Project, Headnode, Hnic, Network, \
NetworkingAction, Metadata
from hil import config
from hil.test_common import fresh_database, config_testsuite, ModelTest, \
fail_on_log_warnings
import pytest
fail_on_log_warnings = pytest.fixture(autouse=True)(fail_on_log_warnings)
@pytest.fixture
def configure():
"""Configure HIL."""
config_testsuite()
config.load_extensions()
fresh_database = pytest.fixture(fresh_database)
pytestmark = pytest.mark.usefixtures('configure', 'fresh_database')
class TestNic(ModelTest):
"""ModelTest for Nic objects."""
def sample_obj(self):
from hil.ext.obm.ipmi import Ipmi
return Nic(Node(label='node-99',
obm=Ipmi(type=Ipmi.api_name,
host="ipmihost",
user="root",
password="tapeworm")),
'ipmi', '00:11:22:33:44:55')
class TestNode(ModelTest):
"""ModelTest for Node objects."""
def sample_obj(self):
from hil.ext.obm.ipmi import Ipmi
return Node(label='node-99',
obm=Ipmi(type=Ipmi.api_name,
host="ipmihost",
user="root",
password="tapeworm"))
class TestProject(ModelTest):
"""ModelTest for Project objects."""
def sample_obj(self):
return Project('manhattan')
class TestHeadnode(ModelTest):
"""ModelTest for Headnode objects."""
def sample_obj(self):
return Headnode(Project('anvil-nextgen'),
'hn-example', 'base-headnode')
class TestHnic(ModelTest):
"""ModelTest for Hnic objects."""
def sample_obj(self):
return Hnic(Headnode(Project('anvil-nextgen'),
'hn-0', 'base-headnode'),
'storage')
class TestNetwork(ModelTest):
"""ModelTest for Network objects."""
def sample_obj(self):
pj = Project('anvil-nextgen')
return Network(pj, [pj], True, '102', 'hammernet')
class TestMetadata(ModelTest):
"""ModelTest for Metadata objects."""
def sample_obj(self):
from hil.ext.obm.ipmi import Ipmi
node = Node(label='node-99',
obm=Ipmi(type=Ipmi.api_name,
host="ipmihost",
user="root",
password="tapeworm"))
return Metadata('EK', 'pk', node)
class TestNetworkingAction(ModelTest):
"""ModelTest for NetworkingAction objects."""
def sample_obj(self):
from hil.ext.obm.ipmi import Ipmi
nic = Nic(Node(label='node-99',
obm=Ipmi(type=Ipmi.api_name,
host="ipmihost",
user="root",
password="tapeworm")),
'ipmi', '00:11:22:33:44:55')
project = Project('anvil-nextgen')
network = Network(project, [project], True, '102', 'hammernet')
return NetworkingAction(nic=nic,
new_network=network,
channel='null')
| apache-2.0 |
jonnylamb/debexpo | debexpo/model/packages.py | 3 | 2375 | # -*- coding: utf-8 -*-
#
# packages.py — packages table model
#
# This file is part of debexpo - https://alioth.debian.org/projects/debexpo/
#
# Copyright © 2008 Jonny Lamb <jonny@debian.org>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
Holds packages table model.
"""
__author__ = 'Jonny Lamb'
__copyright__ = 'Copyright © 2008 Jonny Lamb'
__license__ = 'MIT'
import sqlalchemy as sa
from sqlalchemy import orm
from debexpo.model import meta, OrmObject
from debexpo.model.users import User
from debexpo.lib.constants import PACKAGE_NEEDS_SPONSOR_UNKNOWN
t_packages = sa.Table('packages', meta.metadata,
sa.Column('id', sa.types.Integer, primary_key=True),
sa.Column('name', sa.types.Text(), nullable=False),
sa.Column('user_id', sa.types.Integer, sa.ForeignKey('users.id')),
sa.Column('description', sa.types.Text(), nullable=True),
sa.Column('watch_counter', sa.types.Integer, default=0),
sa.Column('download_counter', sa.types.Integer, default=0),
sa.Column('needs_sponsor', sa.types.Integer, nullable=False, default=PACKAGE_NEEDS_SPONSOR_UNKNOWN),
)
class Package(OrmObject):
foreign = ['user']
def get_description(self):
return self.description
orm.mapper(Package, t_packages, properties={
'user' : orm.relation(User, backref='packages')
})
| mit |
rcastellow/cryptoFX | com/pac/cryptofx/exchange/bitfinex/tests/BitfinexExchangePluginTest.py | 1 | 6432 | '''
Created on March 14, 2014
@author: RobCastellow
Copyright (c) 2014 PAC Enterprises, LLC
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import unittest
from com.pac.cryptofx.exchange.bitfinex.BitfinexExchangePlugin import BitfinexExchangePlugin
from configobj import ConfigObj
from mock import MagicMock
class Test(unittest.TestCase):
apiKey = 's87hi6OhcYFDRnPHXzhTQE9GddWC1R1VkEG79f53qvP'
apiSecret = '4bZefLiSh0O92QcuwZQzm46c7Dy0lycvJPFW387UbUh'
config = ConfigObj('cryptofxTest.properties')
def testName(self):
pass
def testGetTicker(self):
bfep = BitfinexExchangePlugin(self.config)
ticker = bfep.getTicker('ltcbtc')
self.assertNotEqual('', ticker['ask'])
self.assertNotEqual('', ticker['timestamp'])
self.assertNotEqual('', ticker['bid'])
self.assertNotEqual('', ticker['last_price'])
self.assertNotEqual('', ticker['mid'])
def testGetToday(self):
bfep = BitfinexExchangePlugin(self.config)
ticker = bfep.getToday('ltcbtc')
self.assertNotEqual('', ticker['low'])
self.assertNotEqual('', ticker['high'])
self.assertNotEqual('', ticker['volume'])
def testGetStats(self):
bfep = BitfinexExchangePlugin(self.config)
ticker = bfep.getStats('ltcbtc')
self.assertEquals(1, ticker[0]['period'])
self.assertNotEqual('', ticker[0]['volume'])
self.assertEquals(7, ticker[1]['period'])
self.assertNotEqual('', ticker[1]['volume'])
self.assertEquals(30, ticker[2]['period'])
self.assertNotEqual('', ticker[2]['volume'])
def testGetLendbook(self):
bfep = BitfinexExchangePlugin(self.config)
params = {}
params['limit_asks']=2
params['limit_bids']=2
ticker = bfep.getLendbook('ltc',params)
# self.assertNotEqual('', ticker['bids'][0]['timestamp'])
# self.assertNotEqual('', ticker['bids'][0]['rate'])
# self.assertNotEqual('', ticker['bids'][0]['amount'])
# self.assertNotEqual('', ticker['bids'][0]['period'])
# self.assertNotEqual('', ticker['asks'][0]['timestamp'])
# self.assertNotEqual('', ticker['asks'][0]['rate'])
# self.assertNotEqual('', ticker['asks'][0]['amount'])
# self.assertNotEqual('', ticker['asks'][0]['period'])
def testGetBook(self):
bfep = BitfinexExchangePlugin(self.config)
params = {}
params['limit_asks']=2
params['limit_bids']=2
ticker = bfep.getLendbook('ltc',params)
# self.assertNotEqual('', ticker['bids'][0]['timestamp'])
# self.assertNotEqual('', ticker['bids'][0]['rate'])
# self.assertNotEqual('', ticker['bids'][0]['amount'])
# self.assertNotEqual('', ticker['bids'][0]['period'])
# self.assertNotEqual('', ticker['asks'][0]['timestamp'])
# self.assertNotEqual('', ticker['asks'][0]['rate'])
# self.assertNotEqual('', ticker['asks'][0]['amount'])
# self.assertNotEqual('', ticker['asks'][0]['period'])
def testGetTrades(self):
self.assertEquals(1, 1)
def testGetLends(self):
self.assertEquals(1, 1)
def testGetSymbols(self):
self.assertEquals(1, 1)
def testNewOrder(self):
self.assertEquals(1, 1)
def testNewMultiOrders(self):
self.assertEquals(1, 1)
def testCancelOrder(self):
self.assertEquals(1, 1)
def testCancelMultiOrders(self):
self.assertEquals(1, 1)
def testCancelAllActiveOrders(self):
self.assertEquals(1, 1)
def testCancelReplaceOrder(self):
self.assertEquals(1, 1)
def testOrderStatus(self):
self.assertEquals(1, 1)
def testGetActiveOrders(self):
bfep = BitfinexExchangePlugin(self.config)
bfep.apiKey = self.apiKey
bfep.apiSecret = self.apiSecret
ticker = bfep.getActiveOrders()
print ticker
self.assertEquals(1, ticker[0]['period'])
def testActivePositions(self):
self.assertEquals(1, 1)
def testClaimPosition(self):
self.assertEquals(1, 1)
def testPastTrades(self):
self.assertEquals(1, 1)
def testNewOffer(self):
self.assertEquals(1, 1)
def testCancelOffer(self):
self.assertEquals(1, 1)
def testOfferStatus(self):
self.assertEquals(1, 1)
def testActiveOffers(self):
self.assertEquals(1, 1)
def testActiveCredits(self):
bfep = BitfinexExchangePlugin(self.config)
bfep.apiKey = self.apiKey
bfep.apiSecret = self.apiSecret
bfep.getActiveCredits = MagicMock(return_value=[])
credit = bfep.getActiveCredits()
self.assertEqual([], credit)
def testWalletBalances(self):
bfep = BitfinexExchangePlugin(self.config)
bfep.apiKey = self.apiKey
bfep.apiSecret = self.apiSecret
balances = bfep.getWalletBalances()
print balances
self.assertEquals(1, balances[0]['period'])
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | mit |
xxshutong/openerp-7.0 | openerp/addons/stock_invoice_directly/__init__.py | 68 | 1069 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mgaio/parameter-framework | test/functional-tests-legacy/PfwTestCase/Domains/tDomain_Configuration.py | 8 | 28195 | # -*-coding:utf-8 -*
# Copyright (c) 2011-2015, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Creation, renaming and deletion configuration testcases
List of tested functions :
--------------------------
- [listConfigurations] function
- [createConfiguration] function
- [deleteConfiguration] function
- [renameConfiguration] function
Test cases :
------------
- Testing configuration creation error
- Testing configuration renaming error
- Testing configuration deletion error
- Testing nominal case
"""
import os
from Util.PfwUnitTestLib import PfwTestCase
from Util import ACTLogging
log=ACTLogging.Logger()
# Test of Domains - Rename
class TestCases(PfwTestCase):
def setUp(self):
self.pfw.sendCmd("setTuningMode", "on")
self.domain_name = "domain_test"
self.conf_test = "conf_white"
self.conf_test_renamed = "conf_black"
self.new_conf_number = 5
def tearDown(self):
self.pfw.sendCmd("setTuningMode", "off")
def test_Conf_Creation_Error(self):
"""
Testing configuration creation error
------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- Create an already existent configuration
- Create a configuration with no name specified
- Create a configuration on a wrong domain name
Tested commands :
~~~~~~~~~~~~~~~~~
- [createConfiguration] function
- [createDomain] function
- [listConfigurations] function
- [deleteConfiguration] function
- [deleteDomain] function
Expected result :
~~~~~~~~~~~~~~~~~
- no configuration created
- existent configurations not affected by error
"""
log.D(self.test_Conf_Creation_Error.__doc__)
# New domain creation for testing purpose
log.I("New domain creation for testing purpose : %s" % (self.domain_name))
log.I("command [createDomain]")
out, err = self.pfw.sendCmd("createDomain",self.domain_name, "")
assert out == "Done", out
assert err == None, "ERROR : command [createDomain] - Error while creating domain %s" % (self.domain_name)
log.I("command [createDomain] correctly executed")
log.I("Domain %s created" % (self.domain_name))
# New configurations creation for testing purpose
for iteration in range (self.new_conf_number):
new_conf_name = "".join([self.conf_test, "_", str(iteration)])
log.I("New configuration %s creation for domain %s" % (new_conf_name,self.domain_name))
log.I("command [createConfiguration]")
out, err = self.pfw.sendCmd("createConfiguration",self.domain_name,new_conf_name)
assert out == "Done", out
assert err == None, "ERROR : command [createConfiguration] - Error while creating configuration %s" % (new_conf_name)
log.I("command [createConfiguration] correctly executed")
log.I("Configuration %s created for domain %s" % (new_conf_name,self.domain_name))
# Domain configurations listing backup
log.I("Configurations listing for domain %s" % (self.domain_name))
log.I("command [listConfigurations]")
out, err = self.pfw.sendCmd("listConfigurations",self.domain_name, "")
assert err == None, "ERROR : command [listConfigurations] - Error while listing configurations for domain %s" % (self.domain_name)
log.I("command [listConfigurations] correctly executed")
# Saving configurations names
f_configurations_backup = open("f_configurations_backup", "w")
f_configurations_backup.write(out)
f_configurations_backup.close()
# New configurations creation error
log.I("Creating an already existent configurations names")
for iteration in range (self.new_conf_number):
new_conf_name = "".join([self.conf_test, "_", str(iteration)])
log.I("Trying to create already existent %s configuration for domain %s" % (new_conf_name,self.domain_name))
log.I("command [createConfiguration]")
out, err = self.pfw.sendCmd("createConfiguration",self.domain_name,new_conf_name, expectSuccess=False)
assert out != "Done", "ERROR : command [createConfiguration] - Error not detected while creating already existent configuration %s" % (new_conf_name)
assert err == None, "ERROR : command [createConfiguration] - Error while creating configuration %s" % (new_conf_name)
log.I("command [createConfiguration] correctly executed")
log.I("error correctly detected, no configuration created")
log.I("Creating a configuration without specifying a name")
out, err = self.pfw.sendCmd("createConfiguration",self.domain_name, expectSuccess=False)
assert out != "Done", "ERROR : command [createConfiguration] - Error not detected while creating a configuration without specifying a name"
assert err == None, "ERROR : command [createConfiguration] - Error while creating configuration"
log.I("error correctly detected")
log.I("Creating a configuration on a wrong domain name")
new_conf_name = "new_conf"
out, err = self.pfw.sendCmd("createConfiguration","wrong_domain_name",new_conf_name, expectSuccess=False)
assert out != "Done", "ERROR : command [createConfiguration] - Error not detected while creating a configuration on a wrong domain name"
assert err == None, "ERROR : command [createConfiguration] - Error while creating configuration"
log.I("error correctly detected")
# New domain configurations listing
log.I("Configurations listing for domain %s" % (self.domain_name))
log.I("command [listConfigurations]" )
out, err = self.pfw.sendCmd("listConfigurations",self.domain_name, "")
assert err == None, "ERROR : command [listConfigurations] - Error while listing configurations for domain %s" % (self.domain_name)
log.I("command [listConfigurations] correctly executed")
# Saving configurations names
f_configurations = open("f_configurations", "w")
f_configurations.write(out)
f_configurations.close()
# Checking configurations names integrity
log.I("Configurations listing conformity check")
f_configurations = open("f_configurations", "r")
f_configurations_backup = open("f_configurations_backup", "r")
listed_conf_backup = f_configurations_backup.read().splitlines()
listed_conf = f_configurations.read().splitlines()
assert listed_conf==listed_conf_backup, "ERROR : Error while listing configuration %s (found %s)" % (listed_conf_backup, listed_conf)
log.I("No change detected, listed configurations names conform to expected values")
# New domain deletion
log.I("End of test, new domain deletion")
log.I("command [deleteDomain]")
out, err = self.pfw.sendCmd("deleteDomain",self.domain_name, "")
assert out == "Done", "ERROR : %s" % (out)
assert err == None, "ERROR : command [deleteDomain] - Error while deleting domain %s" % (self.domain_name)
log.I("command [deleteDomain] correctly executed")
# Closing and deleting temp files
f_configurations_backup.close()
os.remove("f_configurations_backup")
f_configurations.close()
os.remove("f_configurations")
def test_Conf_Renaming_Error(self):
"""
Testing configuration renaming error
------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- Rename a configuration with an already used name
- Rename a configuration with no name specified
- Rename a configuration on a wrong domain name
Tested commands :
~~~~~~~~~~~~~~~~~
- [renameConfiguration] function
- [createDomain] function
- [listConfigurations] function
- [createConfiguration] function
- [deleteConfiguration] function
- [deleteDomain] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected
- no configuration created
- existent configurations not affected by error
"""
log.D(self.test_Conf_Renaming_Error.__doc__)
# New domain creation for testing purpose
log.I("New domain creation for testing purpose : %s" % (self.domain_name))
log.I("command [createDomain]")
out, err = self.pfw.sendCmd("createDomain",self.domain_name, "")
assert out == "Done", out
assert err == None, "ERROR : command [createDomain] - Error while creating domain %s" % (self.domain_name)
log.I("command [createDomain] correctly executed")
log.I("Domain %s created" % (self.domain_name))
# New configurations creation for testing purpose
for iteration in range (self.new_conf_number):
new_conf_name = "".join([self.conf_test, "_", str(iteration)])
log.I("New configuration %s creation for domain %s" % (new_conf_name,self.domain_name))
log.I("command [createConfiguration]")
out, err = self.pfw.sendCmd("createConfiguration",self.domain_name,new_conf_name)
assert out == "Done", out
assert err == None, "ERROR : command [createConfiguration] - Error while creating configuration %s" % (new_conf_name)
log.I("command [createConfiguration] correctly executed")
log.I("Configuration %s created for domain %s" % (new_conf_name,self.domain_name))
# Domain configurations listing backup
log.I("Configurations listing for domain %s" % (self.domain_name))
log.I("command [listConfigurations]")
out, err = self.pfw.sendCmd("listConfigurations",self.domain_name, "")
assert err == None, "ERROR : command [listConfigurations] - Error while listing configurations for domain %s" % (self.domain_name)
log.I("command [listConfigurations] correctly executed")
# Saving configurations names
f_configurations_backup = open("f_configurations_backup", "w")
f_configurations_backup.write(out)
f_configurations_backup.close()
# New configurations renaming error
log.I("renaming a configuration with an already used name")
for iteration in range (self.new_conf_number-1):
conf_name = "".join([self.conf_test, "_", str(iteration)])
new_conf_name = "".join([self.conf_test, "_", str(iteration+1)])
log.I("Trying to rename %s on domain %s with an already used name : %s" % (conf_name,self.domain_name,new_conf_name))
log.I("command [renameConfiguration]" )
out, err = self.pfw.sendCmd("renameConfiguration",self.domain_name,conf_name,new_conf_name, expectSuccess=False)
assert out != "Done", "ERROR : command [renameConfiguration] - Error not detected while renaming configuration %s with an already used name" % (new_conf_name)
assert err == None, "ERROR : command [renameConfiguration] - Error while renaming configuration %s" % (new_conf_name)
log.I("command [renameConfiguration] correctly executed")
log.I("error correctly detected, no configuration renamed")
log.I("renaming a configuration without specifying a new name")
out, err = self.pfw.sendCmd("renameConfiguration",self.domain_name,new_conf_name, expectSuccess=False)
assert out != "Done", "ERROR : command [renameConfiguration] - Error not detected while renaming a configuration without specifying a new name"
assert err == None, "ERROR : command [renameConfiguration] - Error while renaming configuration"
log.I("error correctly detected, no configuration renamed")
log.I("renaming a configuration on a wrong domain name")
new_conf_name = "new_conf"
out, err = self.pfw.sendCmd("renameConfiguration","wrong_domain_name",new_conf_name,"Configuration", expectSuccess=False)
assert out != "Done", "ERROR : command [renameConfiguration] - Error not detected while renaming a configuration on a wrong domain name"
assert err == None, "ERROR : command [renameConfiguration] - Error while renaming configuration"
log.I("error correctly detected, no configuration renamed")
# New domain configurations listing
log.I("Configurations listing for domain %s" % (self.domain_name))
log.I("command [listConfigurations]")
out, err = self.pfw.sendCmd("listConfigurations",self.domain_name, "")
assert err == None, "ERROR : command [listConfigurations] - Error while listing configurations for domain %s" % (self.domain_name)
log.I("command [listConfigurations] correctly executed")
# Saving configurations names
f_configurations = open("f_configurations", "w")
f_configurations.write(out)
f_configurations.close()
# Checking configurations names integrity
log.I("Configurations listing conformity check")
f_configurations = open("f_configurations", "r")
f_configurations_backup = open("f_configurations_backup", "r")
listed_conf_backup = f_configurations_backup.read().splitlines()
listed_conf = f_configurations.read().splitlines()
assert listed_conf==listed_conf_backup, "ERROR : Error while listing configuration %s (found %s)" % (listed_conf_backup, listed_conf)
log.I("No change detected, listed configurations names conform to expected values")
# Testing domain deletion
log.I("End of test, new domain deletion")
log.I("command [deleteDomain]")
out, err = self.pfw.sendCmd("deleteDomain",self.domain_name, "")
assert out == "Done", "ERROR : %s" % (out)
assert err == None, "ERROR : command [deleteDomain] - Error while deleting domain %s" % (self.domain_name)
log.I("command [deleteDomain] correctly executed")
# Closing and deleting temp files
f_configurations_backup.close()
os.remove("f_configurations_backup")
f_configurations.close()
os.remove("f_configurations")
def test_Conf_Deletion_Error(self):
"""
Testing configuration deletion error
------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- Delete a configuration with a non existent name
- Delete a configuration with no name specified
- Delete a configuration on a wrong domain name
Tested commands :
~~~~~~~~~~~~~~~~~
- [deleteConfiguration] function
- [createDomain] function
- [listConfigurations] function
- [createConfiguration] function
- [deleteDomain] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected
- no configuration created
- existent configurations not affected by error
"""
print(self.test_Conf_Renaming_Error.__doc__)
# New domain creation for testing purpose
log.I("New domain creation for testing purpose : %s" % (self.domain_name))
log.I("command [createDomain]")
out, err = self.pfw.sendCmd("createDomain",self.domain_name, "")
assert out == "Done", out
assert err == None, "ERROR : command [createDomain] - Error while creating domain %s" % (self.domain_name)
log.I("command [createDomain] correctly executed")
log.I("Domain %s created" % (self.domain_name))
# New configurations creation for testing purpose
for iteration in range (self.new_conf_number):
new_conf_name = "".join([self.conf_test, "_", str(iteration)])
log.I("New configuration %s creation for domain %s" % (new_conf_name,self.domain_name))
log.I("command [createConfiguration]")
out, err = self.pfw.sendCmd("createConfiguration",self.domain_name,new_conf_name)
assert out == "Done", out
assert err == None, "ERROR : command [createConfiguration] - Error while creating configuration %s" % (new_conf_name)
log.I("command [createConfiguration] correctly executed")
log.I("Configuration %s created for domain %s" % (new_conf_name,self.domain_name))
# Domain configurations listing backup
log.I("Configurations listing for domain %s" % (self.domain_name))
log.I("command [listConfigurations]")
out, err = self.pfw.sendCmd("listConfigurations",self.domain_name, "")
assert err == None, "ERROR : command [listConfigurations] - Error while listing configurations for domain %s" % (self.domain_name)
log.I("command [listConfigurations] correctly executed")
# Saving configurations names
f_configurations_backup = open("f_configurations_backup", "w")
f_configurations_backup.write(out)
f_configurations_backup.close()
# Configurations deletion errors
log.I("Trying various deletions error test cases")
log.I("Trying to delete a wrong configuration name on domain %s" % (self.domain_name))
log.I("command [deleteConfiguration]")
out, err = self.pfw.sendCmd("deleteConfiguration",self.domain_name,"wrong_configuration_name", expectSuccess=False)
assert out != "Done", "ERROR : command [deleteConfiguration] - Error not detected while deleting non existent configuration name"
assert err == None, "ERROR : command [deleteConfiguration] - Error while deleting configuration"
log.I("command [deleteConfiguration] correctly executed")
log.I("error correctly detected, no configuration deleted")
log.I("deleting a configuration with no name specified")
out, err = self.pfw.sendCmd("deleteConfiguration",self.domain_name, expectSuccess=False)
assert out != "Done", "ERROR : command [deleteConfiguration] - Error not detected while deleting a configuration without specifying a name"
assert err == None, "ERROR : command [deleteConfiguration] - Error while deleting configuration"
log.I("error correctly detected, no configuration deleted")
log.I("deleting a configuration on a wrong domain name")
out, err = self.pfw.sendCmd("deleteConfiguration","wrong_domain_name",new_conf_name, expectSuccess=False)
assert out != "Done", "ERROR : command [deleteConfiguration] - Error not detected while deleting a configuration on a wrong domain name"
assert err == None, "ERROR : command [deleteConfiguration] - Error while deleting configuration"
log.I("error correctly detected, no configuration deleted")
# New domain configurations listing
log.I("Configurations listing for domain %s" % (self.domain_name))
log.I("command [listConfigurations]")
out, err = self.pfw.sendCmd("listConfigurations",self.domain_name, "")
assert err == None, "ERROR : command [listConfigurations] - Error while listing configurations for domain %s" % (self.domain_name)
log.I("command [listConfigurations] correctly executed")
# Saving configurations names
f_configurations = open("f_configurations", "w")
f_configurations.write(out)
f_configurations.close()
# Checking configurations names integrity
log.I("Configurations listing conformity check")
f_configurations = open("f_configurations", "r")
f_configurations_backup = open("f_configurations_backup", "r")
listed_conf_backup = f_configurations_backup.read().splitlines()
listed_conf = f_configurations.read().splitlines()
assert listed_conf==listed_conf_backup, "ERROR : Error while listing configuration %s (found %s)" % (listed_conf_backup, listed_conf)
log.I("No change detected, listed configurations names conform to expected values")
# Testing domain deletion
log.I("End of test, new domain deletion")
log.I("command [deleteDomain]")
out, err = self.pfw.sendCmd("deleteDomain",self.domain_name, "")
assert out == "Done", "ERROR : %s" % (out)
assert err == None, "ERROR : command [deleteDomain] - Error while deleting domain %s" % (self.domain_name)
log.I("command [deleteDomain] correctly executed")
# Closing and deleting temp files
f_configurations_backup.close()
os.remove("f_configurations_backup")
f_configurations.close()
os.remove("f_configurations")
def test_Nominal_Case(self):
"""
Testing nominal cases
---------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- Create new configurations
- List domain configurations
- Rename configurations
- Delete configurations
Tested commands :
~~~~~~~~~~~~~~~~~
- [listConfigurations] function
- [createConfiguration] function
- [renameConfiguration] function
- [deleteConfiguration] function
- [createDomain] function
- [deleteDomain] function
Expected result :
~~~~~~~~~~~~~~~~~
- all operations succeed
"""
log.D(self.test_Nominal_Case.__doc__)
# New domain creation
log.I("New domain creation for testing purpose : %s" % (self.domain_name))
log.I("command [createDomain]")
out, err = self.pfw.sendCmd("createDomain",self.domain_name, "")
assert out == "Done", out
assert err == None, "ERROR : command [createDomain] - Error while creating domain %s" % (self.domain_name)
log.I("command [createDomain] correctly executed")
log.I("Domain %s created" % (self.domain_name))
# New configurations creation
for iteration in range (self.new_conf_number):
new_conf_name = "".join([self.conf_test, "_", str(iteration)])
log.I("New configuration %s creation for domain %s" % (new_conf_name,self.domain_name))
log.I("command [createConfiguration]" )
out, err = self.pfw.sendCmd("createConfiguration",self.domain_name,new_conf_name)
assert out == "Done", out
assert err == None, "ERROR : command [createConfiguration] - Error while creating configuration %s" % (new_conf_name)
log.I("command [createConfiguration] correctly executed")
log.I("Configuration %s created for domain %s" % (new_conf_name,self.domain_name))
# Listing domain configurations
log.I("Configurations listing for domain %s" % (self.domain_name))
log.I("command [listConfigurations]")
out, err = self.pfw.sendCmd("listConfigurations",self.domain_name, "")
assert err == None, "ERROR : command [listConfigurations] - Error while listing configurations for domain %s" % (self.domain_name)
log.I("command [listConfigurations] correctly executed")
# Saving configurations names
f_configurations = open("f_configurations", "w")
f_configurations.write(out)
f_configurations.close()
# Checking configurations names integrity
log.I("Configurations listing conformity check")
f_configurations = open("f_configurations", "r")
new_conf_names = [self.conf_test + "_" + str(iteration) for iteration in range(self.new_conf_number)]
listed_conf = f_configurations.read().strip('\r\n').splitlines()
assert listed_conf == new_conf_names, "ERROR : Error while listing configuration, expected '%s', found '%s'" % (new_conf_names, listed_conf)
log.I("Listed configurations names conform to expected values")
# Configuration renaming
log.I("Configurations renaming")
for iteration in range (self.new_conf_number):
conf_name = "".join([self.conf_test, "_", str(iteration)])
new_conf_name = "".join([self.conf_test_renamed, "_", str(iteration)])
log.I("Configuration %s renamed to %s in domain %s" % (conf_name,new_conf_name,self.domain_name))
log.I("command [renameConfiguration]")
out, err = self.pfw.sendCmd("renameConfiguration",self.domain_name,conf_name,new_conf_name)
assert out == "Done", out
assert err == None, "ERROR : command [renameConfiguration] - Error while renaming configuration %s to %s" % (conf_name,new_conf_name)
log.I("command [renameConfiguration] correctly executed")
log.I("Configuration %s renamed to %s for domain %s" % (conf_name,new_conf_name,self.domain_name))
# Listing domain configurations
log.I("Configurations listing to check configurations renaming")
log.I("command [listConfigurations]")
out, err = self.pfw.sendCmd("listConfigurations",self.domain_name, "")
assert err == None, "ERROR : command [listConfigurations] - Error while listing configurations for domain %s" % (self.domain_name)
log.I("command [listConfigurations] correctly executed")
# Saving configurations names
configurations_renamed = out
# Checking configurations names integrity
log.I("Configurations listing conformity check")
new_conf_names = [self.conf_test_renamed + "_" + str(iteration) for iteration in range(self.new_conf_number)]
listed_conf = configurations_renamed.strip('\r\n').splitlines()
assert listed_conf == new_conf_names, "ERROR : Error while renaming configuration, expected '%s', found '%s'" % (new_conf_names, listed_conf)
log.I("Listed configurations names conform to expected values, renaming successfull")
# New domain deletion
log.I("End of test, new domain deletion")
log.I("command [deleteDomain]")
out, err = self.pfw.sendCmd("deleteDomain",self.domain_name, "")
assert out == "Done", "ERROR : %s" % (out)
assert err == None, "ERROR : command [deleteDomain] - Error while deleting domain %s" % (self.domain_name)
log.I("command [deleteDomain] correctly executed")
# Closing and deleting temp file
f_configurations.close()
os.remove("f_configurations")
| bsd-3-clause |
Beauhurst/django | tests/swappable_models/tests.py | 106 | 1963 | from io import StringIO
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.core import management
from django.test import TestCase, override_settings
from .models import Article
class SwappableModelTests(TestCase):
# Limit memory usage when calling 'migrate'.
available_apps = [
'swappable_models',
'django.contrib.auth',
'django.contrib.contenttypes',
]
@override_settings(TEST_ARTICLE_MODEL='swappable_models.AlternateArticle')
def test_generated_data(self):
"Permissions and content types are not created for a swapped model"
# Delete all permissions and content_types
Permission.objects.filter(content_type__app_label='swappable_models').delete()
ContentType.objects.filter(app_label='swappable_models').delete()
# Re-run migrate. This will re-build the permissions and content types.
new_io = StringIO()
management.call_command('migrate', interactive=False, stdout=new_io)
# Content types and permissions exist for the swapped model,
# but not for the swappable model.
apps_models = [(p.content_type.app_label, p.content_type.model)
for p in Permission.objects.all()]
self.assertIn(('swappable_models', 'alternatearticle'), apps_models)
self.assertNotIn(('swappable_models', 'article'), apps_models)
apps_models = [(ct.app_label, ct.model)
for ct in ContentType.objects.all()]
self.assertIn(('swappable_models', 'alternatearticle'), apps_models)
self.assertNotIn(('swappable_models', 'article'), apps_models)
@override_settings(TEST_ARTICLE_MODEL='swappable_models.article')
def test_case_insensitive(self):
"Model names are case insensitive. Model swapping honors this."
Article.objects.all()
self.assertIsNone(Article._meta.swapped)
| bsd-3-clause |
eduNEXT/edunext-platform | cms/djangoapps/contentstore/management/commands/tests/test_force_publish.py | 4 | 5002 | """
Tests for the force_publish management command
"""
import mock
import six
from django.core.management import CommandError, call_command
from contentstore.management.commands.force_publish import Command
from contentstore.management.commands.utils import get_course_versions
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
class TestForcePublish(SharedModuleStoreTestCase):
"""
Tests for the force_publish management command
"""
@classmethod
def setUpClass(cls):
super(TestForcePublish, cls).setUpClass()
cls.course = CourseFactory.create(default_store=ModuleStoreEnum.Type.split)
cls.test_user_id = ModuleStoreEnum.UserID.test
cls.command = Command()
def test_no_args(self):
"""
Test 'force_publish' command with no arguments
"""
if six.PY2:
errstring = "Error: too few arguments"
else:
errstring = "Error: the following arguments are required: course_key"
with self.assertRaisesRegex(CommandError, errstring):
call_command('force_publish')
def test_invalid_course_key(self):
"""
Test 'force_publish' command with invalid course key
"""
errstring = "Invalid course key."
with self.assertRaisesRegex(CommandError, errstring):
call_command('force_publish', 'TestX/TS01')
def test_too_many_arguments(self):
"""
Test 'force_publish' command with more than 2 arguments
"""
errstring = "Error: unrecognized arguments: invalid-arg"
with self.assertRaisesRegex(CommandError, errstring):
call_command('force_publish', six.text_type(self.course.id), '--commit', 'invalid-arg')
def test_course_key_not_found(self):
"""
Test 'force_publish' command with non-existing course key
"""
errstring = "Course not found."
with self.assertRaisesRegex(CommandError, errstring):
call_command('force_publish', six.text_type('course-v1:org+course+run'))
def test_force_publish_non_split(self):
"""
Test 'force_publish' command doesn't work on non split courses
"""
course = CourseFactory.create(default_store=ModuleStoreEnum.Type.mongo)
errstring = 'The owning modulestore does not support this command.'
with self.assertRaisesRegex(CommandError, errstring):
call_command('force_publish', six.text_type(course.id))
class TestForcePublishModifications(ModuleStoreTestCase):
"""
Tests for the force_publish management command that modify the courseware
during the test.
"""
def setUp(self):
super(TestForcePublishModifications, self).setUp()
self.course = CourseFactory.create(default_store=ModuleStoreEnum.Type.split)
self.test_user_id = ModuleStoreEnum.UserID.test
self.command = Command()
def test_force_publish(self):
"""
Test 'force_publish' command
"""
# Add some changes to course
chapter = ItemFactory.create(category='chapter', parent_location=self.course.location)
self.store.create_child(
self.test_user_id,
chapter.location,
'html',
block_id='html_component'
)
# verify that course has changes.
self.assertTrue(self.store.has_changes(self.store.get_item(self.course.location)))
# get draft and publish branch versions
versions = get_course_versions(six.text_type(self.course.id))
draft_version = versions['draft-branch']
published_version = versions['published-branch']
# verify that draft and publish point to different versions
self.assertNotEqual(draft_version, published_version)
with mock.patch('contentstore.management.commands.force_publish.query_yes_no') as patched_yes_no:
patched_yes_no.return_value = True
# force publish course
call_command('force_publish', six.text_type(self.course.id), '--commit')
# verify that course has no changes
self.assertFalse(self.store.has_changes(self.store.get_item(self.course.location)))
# get new draft and publish branch versions
versions = get_course_versions(six.text_type(self.course.id))
new_draft_version = versions['draft-branch']
new_published_version = versions['published-branch']
# verify that the draft branch didn't change while the published branch did
self.assertEqual(draft_version, new_draft_version)
self.assertNotEqual(published_version, new_published_version)
# verify that draft and publish point to same versions now
self.assertEqual(new_draft_version, new_published_version)
| agpl-3.0 |
kingvuplus/PKT-gui2 | lib/python/Screens/PluginBrowser.py | 2 | 20413 | from boxbranding import getImageVersion
from urllib import urlopen
import socket
import os
from Screens.ParentalControlSetup import ProtectedScreen
from enigma import eConsoleAppContainer, eDVBDB
from Screens.Screen import Screen
from Components.OnlineUpdateCheck import feedsstatuscheck
from Components.ActionMap import ActionMap, NumberActionMap
from Components.config import config, ConfigSubsection, ConfigText
from Components.PluginComponent import plugins
from Components.PluginList import PluginList, PluginEntryComponent, PluginCategoryComponent, PluginDownloadComponent
from Components.Label import Label
from Components.Language import language
from Components.Button import Button
from Components.Harddisk import harddiskmanager
from Components.Sources.StaticText import StaticText
from Components import Ipkg
from Components.config import config
from Screens.MessageBox import MessageBox
from Screens.ChoiceBox import ChoiceBox
from Screens.Console import Console
from Plugins.Plugin import PluginDescriptor
from Tools.Directories import resolveFilename, SCOPE_PLUGINS, SCOPE_ACTIVE_SKIN
from Tools.LoadPixmap import LoadPixmap
language.addCallback(plugins.reloadPlugins)
config.misc.pluginbrowser = ConfigSubsection()
config.misc.pluginbrowser.plugin_order = ConfigText(default="")
class PluginBrowserSummary(Screen):
def __init__(self, session, parent):
Screen.__init__(self, session, parent = parent)
self["entry"] = StaticText("")
self["desc"] = StaticText("")
self.onShow.append(self.addWatcher)
self.onHide.append(self.removeWatcher)
def addWatcher(self):
self.parent.onChangedEntry.append(self.selectionChanged)
self.parent.selectionChanged()
def removeWatcher(self):
self.parent.onChangedEntry.remove(self.selectionChanged)
def selectionChanged(self, name, desc):
self["entry"].text = name
self["desc"].text = desc
class PluginBrowser(Screen, ProtectedScreen):
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("Plugin Browser"))
ProtectedScreen.__init__(self)
self.firsttime = True
self["key_red"] = Button(_("Remove plugins"))
self["key_green"] = Button(_("Download plugins"))
self.list = []
self["list"] = PluginList(self.list)
if config.usage.sort_pluginlist.value:
self["list"].list.sort()
self["actions"] = ActionMap(["WizardActions", "MenuActions"],
{
"ok": self.save,
"back": self.close,
"menu": self.openSetup,
})
self["PluginDownloadActions"] = ActionMap(["ColorActions"],
{
"red": self.delete,
"green": self.download
})
self["DirectionActions"] = ActionMap(["DirectionActions"],
{
"shiftUp": self.moveUp,
"shiftDown": self.moveDown
})
self["NumberActions"] = NumberActionMap(["NumberActions"],
{
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
})
self.onFirstExecBegin.append(self.checkWarnings)
self.onShown.append(self.updateList)
self.onChangedEntry = []
self["list"].onSelectionChanged.append(self.selectionChanged)
self.onLayoutFinish.append(self.saveListsize)
def openSetup(self):
from Screens.Setup import Setup
self.session.open(Setup, "pluginbrowsersetup")
def isProtected(self):
return config.ParentalControl.setuppinactive.value and (not config.ParentalControl.config_sections.main_menu.value or hasattr(self.session, 'infobar') and self.session.infobar is None) and config.ParentalControl.config_sections.plugin_browser.value
def saveListsize(self):
listsize = self["list"].instance.size()
self.listWidth = listsize.width()
self.listHeight = listsize.height()
def createSummary(self):
return PluginBrowserSummary
def selectionChanged(self):
item = self["list"].getCurrent()
if item:
p = item[0]
name = p.name
desc = p.description
else:
name = "-"
desc = ""
for cb in self.onChangedEntry:
cb(name, desc)
def checkWarnings(self):
if len(plugins.warnings):
text = _("Some plugins are not available:\n")
for (pluginname, error) in plugins.warnings:
text += _("%s (%s)\n") % (pluginname, error)
plugins.resetWarnings()
self.session.open(MessageBox, text = text, type = MessageBox.TYPE_WARNING)
def save(self):
self.run()
def run(self):
plugin = self["list"].l.getCurrentSelection()[0]
plugin(session=self.session)
def setDefaultList(self, answer):
if answer:
config.misc.pluginbrowser.plugin_order.value = ""
config.misc.pluginbrowser.plugin_order.save()
self.updateList()
def keyNumberGlobal(self, number):
if number == 0:
if len(self.list) > 0 and config.misc.pluginbrowser.plugin_order.value != "":
self.session.openWithCallback(self.setDefaultList, MessageBox, _("Sort plugins list to default?"), MessageBox.TYPE_YESNO)
else:
real_number = number - 1
if real_number < len(self.list):
self["list"].moveToIndex(real_number)
self.run()
def moveUp(self):
self.move(-1)
def moveDown(self):
self.move(1)
def move(self, direction):
if len(self.list) > 1:
currentIndex = self["list"].getSelectionIndex()
swapIndex = (currentIndex + direction) % len(self.list)
if currentIndex == 0 and swapIndex != 1:
self.list = self.list[1:] + [self.list[0]]
elif swapIndex == 0 and currentIndex != 1:
self.list = [self.list[-1]] + self.list[:-1]
else:
self.list[currentIndex], self.list[swapIndex] = self.list[swapIndex], self.list[currentIndex]
self["list"].l.setList(self.list)
if direction == 1:
self["list"].down()
else:
self["list"].up()
plugin_order = []
for x in self.list:
plugin_order.append(x[0].path[24:])
config.misc.pluginbrowser.plugin_order.value = ",".join(plugin_order)
config.misc.pluginbrowser.plugin_order.save()
def updateList(self):
self.list = []
pluginlist = plugins.getPlugins(PluginDescriptor.WHERE_PLUGINMENU)[:]
for x in config.misc.pluginbrowser.plugin_order.value.split(","):
plugin = list(plugin for plugin in pluginlist if plugin.path[24:] == x)
if plugin:
self.list.append(PluginEntryComponent(plugin[0], self.listWidth))
pluginlist.remove(plugin[0])
self.list = self.list + [PluginEntryComponent(plugin, self.listWidth) for plugin in pluginlist]
self["list"].l.setList(self.list)
def delete(self):
self.session.openWithCallback(self.PluginDownloadBrowserClosed, PluginDownloadBrowser, PluginDownloadBrowser.REMOVE)
def download(self):
self.session.openWithCallback(self.PluginDownloadBrowserClosed, PluginDownloadBrowser, PluginDownloadBrowser.DOWNLOAD, self.firsttime)
self.firsttime = False
def PluginDownloadBrowserClosed(self):
self.updateList()
self.checkWarnings()
def openExtensionmanager(self):
if fileExists(resolveFilename(SCOPE_PLUGINS, "SystemPlugins/SoftwareManager/plugin.py")):
try:
from Plugins.SystemPlugins.SoftwareManager.plugin import PluginManager
except ImportError:
self.session.open(MessageBox, _("The software management extension is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
else:
self.session.openWithCallback(self.PluginDownloadBrowserClosed, PluginManager)
class PluginDownloadBrowser(Screen):
DOWNLOAD = 0
REMOVE = 1
PLUGIN_PREFIX = 'enigma2-plugin-'
lastDownloadDate = None
def __init__(self, session, type = 0, needupdate = True):
Screen.__init__(self, session)
self.type = type
self.needupdate = needupdate
self.container = eConsoleAppContainer()
self.container.appClosed.append(self.runFinished)
self.container.dataAvail.append(self.dataAvail)
self.onLayoutFinish.append(self.startRun)
self.onShown.append(self.setWindowTitle)
self.list = []
self["list"] = PluginList(self.list)
self.pluginlist = []
self.expanded = []
self.installedplugins = []
self.plugins_changed = False
self.reload_settings = False
self.check_settings = False
self.check_bootlogo = False
self.install_settings_name = ''
self.remove_settings_name = ''
if self.type == self.DOWNLOAD:
self["text"] = Label(_("Downloading plugin information. Please wait..."))
elif self.type == self.REMOVE:
self["text"] = Label(_("Getting plugin information. Please wait..."))
self.run = 0
self.remainingdata = ""
self["actions"] = ActionMap(["WizardActions"],
{
"ok": self.go,
"back": self.requestClose,
})
if os.path.isfile('/usr/bin/opkg'):
self.ipkg = '/usr/bin/opkg'
self.ipkg_install = self.ipkg + ' install'
self.ipkg_remove = self.ipkg + ' remove --autoremove'
else:
self.ipkg = 'ipkg'
self.ipkg_install = 'ipkg install -force-defaults'
self.ipkg_remove = self.ipkg + ' remove'
def go(self):
sel = self["list"].l.getCurrentSelection()
if sel is None:
return
sel = sel[0]
if isinstance(sel, str): # category
if sel in self.expanded:
self.expanded.remove(sel)
else:
self.expanded.append(sel)
self.updateList()
else:
if self.type == self.DOWNLOAD:
mbox=self.session.openWithCallback(self.runInstall, MessageBox, _("Do you really want to download the plugin \"%s\"?") % sel.name)
mbox.setTitle(_("Download plugins"))
elif self.type == self.REMOVE:
mbox=self.session.openWithCallback(self.runInstall, MessageBox, _("Do you really want to remove the plugin \"%s\"?") % sel.name, default = False)
mbox.setTitle(_("Remove plugins"))
def requestClose(self):
if self.plugins_changed:
plugins.readPluginList(resolveFilename(SCOPE_PLUGINS))
if self.reload_settings:
self["text"].setText(_("Reloading bouquets and services..."))
eDVBDB.getInstance().reloadBouquets()
eDVBDB.getInstance().reloadServicelist()
plugins.readPluginList(resolveFilename(SCOPE_PLUGINS))
self.container.appClosed.remove(self.runFinished)
self.container.dataAvail.remove(self.dataAvail)
self.close()
def resetPostInstall(self):
try:
del self.postInstallCall
except:
pass
def installDestinationCallback(self, result):
if result is not None:
dest = result[1]
if dest.startswith('/'):
# Custom install path, add it to the list too
dest = os.path.normpath(dest)
extra = '--add-dest %s:%s -d %s' % (dest,dest,dest)
Ipkg.opkgAddDestination(dest)
else:
extra = '-d ' + dest
self.doInstall(self.installFinished, self["list"].l.getCurrentSelection()[0].name + ' ' + extra)
else:
self.resetPostInstall()
def runInstall(self, val):
if val:
if self.type == self.DOWNLOAD:
if self["list"].l.getCurrentSelection()[0].name.startswith("picons-"):
supported_filesystems = frozenset(('ext4', 'ext3', 'ext2', 'reiser', 'reiser4', 'jffs2', 'ubifs', 'rootfs'))
candidates = []
import Components.Harddisk
mounts = Components.Harddisk.getProcMounts()
for partition in harddiskmanager.getMountedPartitions(False, mounts):
if partition.filesystem(mounts) in supported_filesystems:
candidates.append((partition.description, partition.mountpoint))
if candidates:
from Components.Renderer import Picon
self.postInstallCall = Picon.initPiconPaths
self.session.openWithCallback(self.installDestinationCallback, ChoiceBox, title=_("Install picons on"), list=candidates)
return
elif self["list"].l.getCurrentSelection()[0].name.startswith("display-picon"):
supported_filesystems = frozenset(('ext4', 'ext3', 'ext2', 'reiser', 'reiser4', 'jffs2', 'ubifs', 'rootfs'))
candidates = []
import Components.Harddisk
mounts = Components.Harddisk.getProcMounts()
for partition in harddiskmanager.getMountedPartitions(False, mounts):
if partition.filesystem(mounts) in supported_filesystems:
candidates.append((partition.description, partition.mountpoint))
if candidates:
from Components.Renderer import LcdPicon
self.postInstallCall = LcdPicon.initLcdPiconPaths
self.session.openWithCallback(self.installDestinationCallback, ChoiceBox, title=_("Install lcd picons on"), list=candidates)
return
self.install_settings_name = self["list"].l.getCurrentSelection()[0].name
self.install_bootlogo_name = self["list"].l.getCurrentSelection()[0].name
if self["list"].l.getCurrentSelection()[0].name.startswith('settings-'):
self.check_settings = True
self.startIpkgListInstalled(self.PLUGIN_PREFIX + 'settings-*')
elif self["list"].l.getCurrentSelection()[0].name.startswith('bootlogos-'):
self.check_bootlogo = True
self.startIpkgListInstalled(self.PLUGIN_PREFIX + 'bootlogos-*')
else:
self.runSettingsInstall()
elif self.type == self.REMOVE:
self.doRemove(self.installFinished, self["list"].l.getCurrentSelection()[0].name + " --force-remove --force-depends")
def doRemove(self, callback, pkgname):
self.session.openWithCallback(callback, Console, cmdlist = [self.ipkg_remove + Ipkg.opkgExtraDestinations() + " " + self.PLUGIN_PREFIX + pkgname, "sync"], closeOnSuccess = True)
def doInstall(self, callback, pkgname):
self.session.openWithCallback(callback, Console, cmdlist = [self.ipkg_install + " " + self.PLUGIN_PREFIX + pkgname, "sync"], closeOnSuccess = True)
def runSettingsRemove(self, val):
if val:
self.doRemove(self.runSettingsInstall, self.remove_settings_name)
def runBootlogoRemove(self, val):
if val:
self.doRemove(self.runSettingsInstall, self.remove_bootlogo_name + " --force-remove --force-depends")
def runSettingsInstall(self):
self.doInstall(self.installFinished, self.install_settings_name)
def setWindowTitle(self):
if self.type == self.DOWNLOAD:
self.setTitle(_("Install plugins"))
elif self.type == self.REMOVE:
self.setTitle(_("Remove plugins"))
def startIpkgListInstalled(self, pkgname = PLUGIN_PREFIX + '*'):
self.container.execute(self.ipkg + Ipkg.opkgExtraDestinations() + " list_installed '%s'" % pkgname)
def startIpkgListAvailable(self):
self.container.execute(self.ipkg + Ipkg.opkgExtraDestinations() + " list '" + self.PLUGIN_PREFIX + "*'")
def startRun(self):
listsize = self["list"].instance.size()
self["list"].instance.hide()
self.listWidth = listsize.width()
self.listHeight = listsize.height()
if self.type == self.DOWNLOAD:
if feedsstatuscheck.getFeedsBool() not in ('stable', 'unstable'):
self["text"].setText(feedsstatuscheck.getFeedsErrorMessage())
elif config.softwareupdate.updateisunstable.value == '1' and config.softwareupdate.updatebeta.value:
self["text"].setText(_("WARNING: feeds may be unstable.") + '\n' + _("Downloading plugin information. Please wait..."))
self.container.execute(self.ipkg + " update")
elif config.softwareupdate.updateisunstable.value == '1' and not config.softwareupdate.updatebeta.value:
self["text"].setText(_("Sorry feeds seem be in an unstable state, if you wish to use them please enable 'Allow unstable (experimental) updates' in \"Software update settings\"."))
else:
self.container.execute(self.ipkg + " update")
elif self.type == self.REMOVE:
self.run = 1
self.startIpkgListInstalled()
def installFinished(self):
if hasattr(self, 'postInstallCall'):
try:
self.postInstallCall()
except Exception, ex:
print "[PluginBrowser] postInstallCall failed:", ex
self.resetPostInstall()
try:
os.unlink('/tmp/opkg.conf')
except:
pass
for plugin in self.pluginlist:
if plugin[3] == self["list"].l.getCurrentSelection()[0].name:
self.pluginlist.remove(plugin)
break
self.plugins_changed = True
if self["list"].l.getCurrentSelection()[0].name.startswith("settings-"):
self.reload_settings = True
self.expanded = []
self.updateList()
self["list"].moveToIndex(0)
def runFinished(self, retval):
if self.check_settings:
self.check_settings = False
self.runSettingsInstall()
return
if self.check_bootlogo:
self.check_bootlogo = False
self.runSettingsInstall()
return
self.remainingdata = ""
if self.run == 0:
self.run = 1
if self.type == self.DOWNLOAD:
self.startIpkgListInstalled()
elif self.run == 1 and self.type == self.DOWNLOAD:
self.run = 2
from Components import opkg
pluginlist = []
self.pluginlist = pluginlist
for plugin in opkg.enumPlugins(self.PLUGIN_PREFIX):
if not plugin[0].endswith('-common') and not plugin[0].endswith('-meta') and plugin[0] not in self.installedplugins and ((not config.pluginbrowser.po.value and not plugin[0].endswith('-po')) or config.pluginbrowser.po.value) and ((not config.pluginbrowser.src.value and not plugin[0].endswith('-src')) or config.pluginbrowser.src.value):
pluginlist.append(plugin + (plugin[0][15:],))
if pluginlist:
self["text"].hide()
pluginlist.sort()
self.updateList()
self["list"].instance.show()
else:
self["text"].setText(_("No new plugins found"))
else:
self["text"].hide()
if self.pluginlist:
self.updateList()
self["list"].instance.show()
else:
if self.type == self.DOWNLOAD:
self["text"].setText(_("Sorry the feeds are down for maintenance"))
def dataAvail(self, str):
if self.type == self.DOWNLOAD and ('wget returned 1' or 'wget returned 255' or '404 Not Found') in str:
self.run = 3
return
#prepend any remaining data from the previous call
str = self.remainingdata + str
#split in lines
lines = str.split('\n')
#'str' should end with '\n', so when splitting, the last line should be empty. If this is not the case, we received an incomplete line
if len(lines[-1]):
#remember this data for next time
self.remainingdata = lines[-1]
lines = lines[0:-1]
else:
self.remainingdata = ""
if self.check_settings:
self.check_settings = False
self.remove_settings_name = str.split(' - ')[0].replace(self.PLUGIN_PREFIX, '')
self.session.openWithCallback(self.runSettingsRemove, MessageBox, _('You already have a channel list installed,\nwould you like to remove\n"%s"?') % self.remove_settings_name)
return
if self.check_bootlogo:
self.check_bootlogo = False
self.remove_bootlogo_name = str.split(' - ')[0].replace(self.PLUGIN_PREFIX, '')
self.session.openWithCallback(self.runBootlogoRemove, MessageBox, _('You already have a bootlogo installed,\nwould you like to remove\n"%s"?') % self.remove_bootlogo_name)
return
if self.run == 1:
for x in lines:
plugin = x.split(" - ", 2)
# 'opkg list_installed' only returns name + version, no description field
if len(plugin) >= 2:
if not plugin[0].endswith('-common') and not plugin[0].endswith('-dev') and not plugin[0].endswith('-staticdev') and not plugin[0].endswith('-dbg') and not plugin[0].endswith('-doc') and not plugin[0].endswith('-meta'):
if plugin[0] not in self.installedplugins:
if self.type == self.DOWNLOAD and ((not config.pluginbrowser.po.value and not plugin[0].endswith('-po')) or config.pluginbrowser.po.value) and ((not config.pluginbrowser.src.value and not plugin[0].endswith('-src')) or config.pluginbrowser.src.value):
self.installedplugins.append(plugin[0])
else:
if len(plugin) == 2:
plugin.append('')
plugin.append(plugin[0][15:])
self.pluginlist.append(plugin)
self.pluginlist.sort()
def updateList(self):
list = []
expandableIcon = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/expandable-plugins.png"))
expandedIcon = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/expanded-plugins.png"))
verticallineIcon = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/verticalline-plugins.png"))
self.plugins = {}
for x in self.pluginlist:
split = x[3].split('-', 1)
if len(split) < 2:
continue
if not self.plugins.has_key(split[0]):
self.plugins[split[0]] = []
self.plugins[split[0]].append((PluginDescriptor(name = x[3], description = x[2], icon = verticallineIcon), split[1], x[1]))
temp = self.plugins.keys()
if config.usage.sort_pluginlist.value:
temp.sort()
for x in temp:
if x in self.expanded:
list.append(PluginCategoryComponent(x, expandedIcon, self.listWidth))
list.extend([PluginDownloadComponent(plugin[0], plugin[1], plugin[2], self.listWidth) for plugin in self.plugins[x]])
else:
list.append(PluginCategoryComponent(x, expandableIcon, self.listWidth))
self.list = list
self["list"].l.setList(list)
| gpl-2.0 |
mgit-at/ansible | test/units/modules/network/onyx/test_onyx_pfc_interface.py | 68 | 4284 | #
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.onyx import onyx_pfc_interface
from units.modules.utils import set_module_args
from .onyx_module import TestOnyxModule, load_fixture
class TestOnyxPfcInterfaceModule(TestOnyxModule):
module = onyx_pfc_interface
def setUp(self):
super(TestOnyxPfcInterfaceModule, self).setUp()
self._pfc_enabled = True
self.mock_get_config = patch.object(
onyx_pfc_interface.OnyxPfcInterfaceModule,
"_get_pfc_config")
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch(
'ansible.module_utils.network.onyx.onyx.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_version = patch.object(
onyx_pfc_interface.OnyxPfcInterfaceModule, "_get_os_version")
self.get_version = self.mock_get_version.start()
def tearDown(self):
super(TestOnyxPfcInterfaceModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_get_version.stop()
def load_fixtures(self, commands=None, transport='cli'):
if self._pfc_enabled:
suffix = 'enabled'
else:
suffix = 'disabled'
config_file = 'onyx_pfc_interface_%s.cfg' % suffix
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
self.get_version.return_value = "3.6.5000"
def _test_pfc_if(self, if_name, enabled, changed, commands):
state = 'enabled' if enabled else 'disabled'
set_module_args(dict(name=if_name, state=state))
self.execute_module(changed=changed, commands=commands)
def _test_pfc_no_change(self, enabled):
interfaces = ('Eth1/1', 'Eth1/1/2', 'Po1', 'Mpo2')
changed = False
commands = None
for ifc in interfaces:
self._test_pfc_if(ifc, enabled, changed, commands)
def test_pfc_enabled_no_change(self):
self._pfc_enabled = True
enabled = True
self._test_pfc_no_change(enabled)
def test_pfc_disabled_no_change(self):
self._pfc_enabled = False
enabled = False
self._test_pfc_no_change(enabled)
def _test_pfc_change(self, enabled):
cmd_list = [
('Eth1/1', 'interface ethernet 1/1'),
('Eth1/1/2', 'interface ethernet 1/1/2'),
('Po1', 'interface port-channel 1'),
('Mpo2', 'interface mlag-port-channel 2'),
]
changed = True
suffix = ' dcb priority-flow-control mode on force'
if not enabled:
suffix = ' no dcb priority-flow-control mode force'
for (if_name, cmd) in cmd_list:
commands = [cmd + suffix]
self._test_pfc_if(if_name, enabled, changed, commands)
def test_pfc_disabled_change(self):
self._pfc_enabled = False
enabled = True
self._test_pfc_change(enabled)
def test_pfc_enabled_change(self):
self._pfc_enabled = True
enabled = False
self._test_pfc_change(enabled)
def test_pfc_aggregate(self):
self._pfc_enabled = False
aggregate = [dict(name='Eth1/1'), dict(name='Eth1/1/2')]
set_module_args(dict(aggregate=aggregate, state='enabled'))
commands = [
'interface ethernet 1/1 dcb priority-flow-control mode on force',
'interface ethernet 1/1/2 dcb priority-flow-control mode on force']
self.execute_module(changed=True, commands=commands)
def test_pfc_aggregate_purge(self):
self._pfc_enabled = True
aggregate = [dict(name='Po1'), dict(name='Mpo2')]
set_module_args(dict(aggregate=aggregate, state='enabled', purge=True))
commands = [
'interface ethernet 1/1 no dcb priority-flow-control mode force',
'interface ethernet 1/1/2 no dcb priority-flow-control mode force']
self.execute_module(changed=True, commands=commands)
| gpl-3.0 |
micaiahparker/vote | voters/alignment.py | 1 | 1180 | from math import sqrt
from random import uniform
from .utils import distance, normalizer
normalize = normalizer(0, sqrt(8))
class Alignment:
def __init__(self, state=None, moral=None):
self.state = state or uniform(-1, 1)
self.moral = moral or uniform(-1, 1)
@property
def description(self):
state, moral = self._str_state(), self._str_moral()
if state == moral:
return 'True Neutral'
return '{state} {moral}'.format(state=state, moral=moral)
def __repr__(self):
return self.description
def opinion(self, alignment):
return normalize(distance(self.position, alignment.position))
def _str_state(self):
if self.state <= -(1/3):
return 'Chaotic'
if -(1/3) < self.state < (1/3):
return 'Neutral'
if (1/3) <= self.state:
return 'Lawful'
def _str_moral(self):
if self.moral <= -(1/3):
return 'Evil'
if -(1/3) < self.moral < (1/3):
return 'Neutral'
if (1/3) <= self.moral:
return 'Good'
@property
def position(self):
return self.state, self.moral
| mit |
nickchen-mitac/fork | src/ava/web/service.py | 1 | 3040 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import os
import logging
from ..core import get_core_context
from ava.runtime import environ
from ava.runtime.config import settings
from .bottle import request, response, HTTPError, static_file as _static_file
from . import defines as D
logger = logging.getLogger(__name__)
static_folder = os.path.join(environ.pod_dir(), 'webroot')
def get_webfront_engine():
return get_core_context().lookup(D.WEBFRONT_CONTEXT_NAME)
def raise_unauthorized(desc=b'Authentication required.'):
raise HTTPError(D.HTTP_STATUS_AUTH_REQUIRED, desc)
def get_access_token():
return get_webfront_engine().access_token
def require_auth(callback):
def wrapper(*args, **kwargs):
auth = request.get_header('Authorization')
if get_webfront_engine().access_token != auth:
response.status = D.HTTP_STATUS_AUTH_REQUIRED
response.content_type = D.JSON_CONTENT_TYPE
return dict(status='error', reason='Authentication required.')
body = callback(*args, **kwargs)
return body
return wrapper
def require_json(callback):
def wrapper(*args, **kwargs):
ct = request.content_type
logger.debug("Content-type: %s", ct)
if ct is None:
ct = ''
ct.strip().lower()
if not ct.startswith('application/json'):
logger.warning("JSON type expected, instead received: %s", ct)
response.status = D.HTTP_STATUS_UNSUPPORTED_TYPE
response.content_type = D.JSON_CONTENT_TYPE
return dict(status='error', reason='Request data type is not supported.')
body = callback(*args, **kwargs)
return body
return wrapper
def static_file(filepath, root=static_folder, mimetype='auto', download=False, charset='utf-8'):
return _static_file(filepath, root=root, mimetype=mimetype, download=download, charset=charset)
def swap_root_app(wsgiapp):
""" Swap the root WSGI application.
:param wsgiapp:
:return: the previous WSGI application.
"""
from .webfront import dispatcher
old_app = dispatcher.app
dispatcher.app = wsgiapp
return old_app
def set_cors_headers():
"""
Set CORS headers
"""
if request.method == 'GET':
response.set_header(b'Access-Control-Allow-Origin', b'*')
return
if request.method == 'OPTIONS':
response.set_header(b'Access-Control-Allow-Methods',
b'GET, PUT, HEAD, DELETE, OPTIONS')
response.set_header(b'Access-Control-Allow-Headers',
b'authorization')
client_origin = request.get_header(b'Origin', b'*')
# for PUT and DELETE operations, echo back the given Origin header.
response.set_header(b'Access-Control-Allow-Origin', client_origin)
__all__ = ['raise_unauthorized', 'static_file',
'swap_root_app', 'set_cors_headers', 'get_webfront_engine',
'get_access_token']
| apache-2.0 |
RegaliaEzz/Hexa-N9208 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
malikabhi05/upm | examples/python/moisture.py | 6 | 2463 | #!/usr/bin/env python
# Author: Zion Orent <zorent@ics.com>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_moisture as upmMoisture
def main():
# Instantiate a Grove Moisture sensor on analog pin A0
myMoisture = upmMoisture.Moisture(0)
## Exit handlers ##
# This function stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This function lets you run code on exit, including functions from myMoisture
def exitHandler():
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
# Values (approximate):
# 0-300, sensor in air or dry soil
# 300-600, sensor in humid soil
# 600+, sensor in wet soil or submerged in water
# Read the value every second and print the corresponding moisture level
while(1):
moisture_val = myMoisture.value()
if (moisture_val >= 0 and moisture_val < 300):
result = "Dry"
elif (moisture_val >= 300 and moisture_val < 600):
result = "Moist"
else:
result = "Wet"
print("Moisture value: {0}, {1}".format(moisture_val, result))
time.sleep(1)
if __name__ == '__main__':
main()
| mit |
anryko/ansible | lib/ansible/modules/monitoring/honeybadger_deployment.py | 102 | 3646 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2014 Benjamin Curtis <benjamin.curtis@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: honeybadger_deployment
author: "Benjamin Curtis (@stympy)"
version_added: "2.2"
short_description: Notify Honeybadger.io about app deployments
description:
- Notify Honeybadger.io about app deployments (see http://docs.honeybadger.io/article/188-deployment-tracking)
options:
token:
description:
- API token.
required: true
environment:
description:
- The environment name, typically 'production', 'staging', etc.
required: true
user:
description:
- The username of the person doing the deployment
repo:
description:
- URL of the project repository
revision:
description:
- A hash, number, tag, or other identifier showing what revision was deployed
url:
description:
- Optional URL to submit the notification to.
default: "https://api.honeybadger.io/v1/deploys"
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
'''
EXAMPLES = '''
- honeybadger_deployment:
token: AAAAAA
environment: staging
user: ansible
revision: b6826b8
repo: 'git@github.com:user/repo.git'
'''
RETURN = '''# '''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import fetch_url
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True, no_log=True),
environment=dict(required=True),
user=dict(required=False),
repo=dict(required=False),
revision=dict(required=False),
url=dict(required=False, default='https://api.honeybadger.io/v1/deploys'),
validate_certs=dict(default='yes', type='bool'),
),
supports_check_mode=True
)
params = {}
if module.params["environment"]:
params["deploy[environment]"] = module.params["environment"]
if module.params["user"]:
params["deploy[local_username]"] = module.params["user"]
if module.params["repo"]:
params["deploy[repository]"] = module.params["repo"]
if module.params["revision"]:
params["deploy[revision]"] = module.params["revision"]
params["api_key"] = module.params["token"]
url = module.params.get('url')
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=True)
try:
data = urlencode(params)
response, info = fetch_url(module, url, data=data)
except Exception as e:
module.fail_json(msg='Unable to notify Honeybadger: %s' % to_native(e), exception=traceback.format_exc())
else:
if info['status'] == 201:
module.exit_json(changed=True)
else:
module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
if __name__ == '__main__':
main()
| gpl-3.0 |
mapbox/gyp | test/copies/gyptest-slash.py | 100 | 1434 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies file copies with a trailing slash in the destination directory.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('copies-slash.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('copies-slash.gyp', chdir='relocate/src')
test.built_file_must_match('copies-out-slash/directory/file3',
'file3 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out-slash/directory/file4',
'file4 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out-slash/directory/subdir/file5',
'file5 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out-slash-2/directory/file3',
'file3 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out-slash-2/directory/file4',
'file4 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out-slash-2/directory/subdir/file5',
'file5 contents\n',
chdir='relocate/src')
test.pass_test()
| bsd-3-clause |
partofthething/home-assistant | tests/components/nexia/test_sensor.py | 15 | 4864 | """The sensor tests for the nexia platform."""
from homeassistant.const import PERCENTAGE, TEMP_CELSIUS
from .util import async_init_integration
async def test_create_sensors(hass):
"""Test creation of sensors."""
await async_init_integration(hass)
state = hass.states.get("sensor.nick_office_temperature")
assert state.state == "23"
expected_attributes = {
"attribution": "Data provided by mynexia.com",
"device_class": "temperature",
"friendly_name": "Nick Office Temperature",
"unit_of_measurement": TEMP_CELSIUS,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
state = hass.states.get("sensor.nick_office_zone_setpoint_status")
assert state.state == "Permanent Hold"
expected_attributes = {
"attribution": "Data provided by mynexia.com",
"friendly_name": "Nick Office Zone Setpoint Status",
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
state = hass.states.get("sensor.nick_office_zone_status")
assert state.state == "Relieving Air"
expected_attributes = {
"attribution": "Data provided by mynexia.com",
"friendly_name": "Nick Office Zone Status",
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
state = hass.states.get("sensor.master_suite_air_cleaner_mode")
assert state.state == "auto"
expected_attributes = {
"attribution": "Data provided by mynexia.com",
"friendly_name": "Master Suite Air Cleaner Mode",
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
state = hass.states.get("sensor.master_suite_current_compressor_speed")
assert state.state == "69.0"
expected_attributes = {
"attribution": "Data provided by mynexia.com",
"friendly_name": "Master Suite Current Compressor Speed",
"unit_of_measurement": PERCENTAGE,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
state = hass.states.get("sensor.master_suite_outdoor_temperature")
assert state.state == "30.6"
expected_attributes = {
"attribution": "Data provided by mynexia.com",
"device_class": "temperature",
"friendly_name": "Master Suite Outdoor Temperature",
"unit_of_measurement": TEMP_CELSIUS,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
state = hass.states.get("sensor.master_suite_relative_humidity")
assert state.state == "52.0"
expected_attributes = {
"attribution": "Data provided by mynexia.com",
"device_class": "humidity",
"friendly_name": "Master Suite Relative Humidity",
"unit_of_measurement": PERCENTAGE,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
state = hass.states.get("sensor.master_suite_requested_compressor_speed")
assert state.state == "69.0"
expected_attributes = {
"attribution": "Data provided by mynexia.com",
"friendly_name": "Master Suite Requested Compressor Speed",
"unit_of_measurement": PERCENTAGE,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
state = hass.states.get("sensor.master_suite_system_status")
assert state.state == "Cooling"
expected_attributes = {
"attribution": "Data provided by mynexia.com",
"friendly_name": "Master Suite System Status",
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
| mit |
gonicus/gosa | backend/src/gosa/backend/plugins/upload/handler/workflow.py | 1 | 2661 | # This file is part of the GOsa framework.
#
# http://gosa-project.org
#
# Copyright:
# (C) 2016 GONICUS GmbH, Germany, http://www.gonicus.de
#
# See the LICENSE file in the project's top-level directory for details.
import logging
import os
from zipfile import ZipFile
from gosa.backend.routes.sse.main import SseHandler
from gosa.common.event import EventMaker
from lxml import objectify, etree
from pkg_resources import resource_filename
from gosa.backend.plugins.upload.main import IUploadFileHandler
from gosa.backend.components.workflowregistry import WorkflowRegistry
from gosa.common import Environment
class WorkflowUploadHandler(IUploadFileHandler):
def __init__(self):
self.env = Environment.getInstance()
self.log = logging.getLogger(__name__)
self.log.info("initializing workflow upload handler")
def handle_upload(self, file, request):
filename = request.headers.get('X-File-Name')
self.log.debug("uploaded workflow file received %s" % filename)
self.extract(file.f_out.name, filename)
def extract(self, fn, real_name):
try:
with ZipFile(fn) as workflow_zip:
if workflow_zip.testzip():
self.log.error("bad workflow zip uploaded")
return
env = Environment.getInstance()
schema = etree.XMLSchema(file=resource_filename("gosa.backend", "data/workflow.xsd"))
parser = objectify.makeparser(schema=schema)
try:
with workflow_zip.open('workflow.xml') as dsc:
root = objectify.fromstring(dsc.read(), parser)
id = objectify.ObjectPath("Workflow.Id")(root)[0].text
target = os.path.join(env.config.get("core.workflow-path", "/var/lib/gosa/workflows"), id)
workflow_zip.extractall(target)
WorkflowRegistry.get_instance().refreshWorkflows()
# send the event to the clients
e = EventMaker()
ev = e.Event(e.WorkflowUpdate(
e.Id(id),
e.ChangeType("create")
))
event_object = objectify.fromstring(etree.tostring(ev, pretty_print=True).decode('utf-8'))
SseHandler.notify(event_object, channel="broadcast")
except KeyError:
self.log.error("bad workflow zip uploaded - no workflow.xml present")
except Exception as e:
print(e)
raise e | lgpl-2.1 |
mluo613/osf.io | tests/test_oauth.py | 4 | 25440 | from datetime import datetime
import httplib as http
import logging
import json
import time
import urlparse
import httpretty
from nose.tools import * # noqa
import pytz
from oauthlib.oauth2 import OAuth2Error
from framework.auth import authenticate
from framework.exceptions import PermissionsError, HTTPError
from framework.sessions import session
from website.oauth.models import (
ExternalAccount,
ExternalProvider,
OAUTH1,
OAUTH2,
)
from website.util import api_url_for, web_url_for
from tests.base import OsfTestCase
from osf_tests.factories import (
AuthUserFactory,
ExternalAccountFactory,
MockOAuth2Provider,
UserFactory,
)
SILENT_LOGGERS = ['oauthlib', 'requests_oauthlib']
for logger in SILENT_LOGGERS:
logging.getLogger(logger).setLevel(logging.ERROR)
class MockOAuth1Provider(ExternalProvider):
_oauth_version = OAUTH1
name = "Mock OAuth 1.0a Provider"
short_name = "mock1a"
client_id = "mock1a_client_id"
client_secret = "mock1a_client_secret"
auth_url_base = "http://mock1a.com/auth"
request_token_url = "http://mock1a.com/request"
callback_url = "http://mock1a.com/callback"
def handle_callback(self, response):
return {
'provider_id': 'mock_provider_id'
}
def _prepare_mock_oauth2_handshake_response(expires_in=3600):
httpretty.register_uri(
httpretty.POST,
'https://mock2.com/callback',
body=json.dumps({
'access_token': 'mock_access_token',
'expires_at': time.time() + expires_in,
'expires_in': expires_in,
'refresh_token': 'mock_refresh_token',
'scope': ['all'],
'token_type': 'bearer',
}),
status=200,
content_type='application/json',
)
def _prepare_mock_500_error():
httpretty.register_uri(
httpretty.POST,
'https://mock2.com/callback',
body='{"error": "not found"}',
status=503,
content_type='application/json',
)
def _prepare_mock_401_error():
httpretty.register_uri(
httpretty.POST,
'https://mock2.com/callback',
body='{"error": "user denied access"}',
status=401,
content_type='application/json',
)
class TestExternalAccount(OsfTestCase):
# Test the ExternalAccount object and associated views.
#
# Functionality not specific to the OAuth version used by the
# ExternalProvider should go here.
def setUp(self):
super(TestExternalAccount, self).setUp()
self.user = AuthUserFactory()
self.provider = MockOAuth2Provider()
def test_disconnect(self):
# Disconnect an external account from a user
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
)
self.user.external_accounts.add(external_account)
self.user.save()
# If the external account isn't attached, this test has no meaning
assert_equal(ExternalAccount.find().count(), 1)
assert_in(
external_account,
self.user.external_accounts.all(),
)
response = self.app.delete(
api_url_for('oauth_disconnect',
external_account_id=external_account._id),
auth=self.user.auth
)
# Request succeeded
assert_equal(
response.status_code,
http.OK,
)
self.user.reload()
# external_account.reload()
# External account has been disassociated with the user
assert_not_in(
external_account,
self.user.external_accounts.all(),
)
# External account is still in the database
assert_equal(ExternalAccount.find().count(), 1)
def test_disconnect_with_multiple_connected(self):
# Disconnect an account connected to multiple users from one user
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
)
self.user.external_accounts.add(external_account)
self.user.save()
other_user = UserFactory()
other_user.external_accounts.add(external_account)
other_user.save()
response = self.app.delete(
api_url_for('oauth_disconnect',
external_account_id=external_account._id),
auth=self.user.auth
)
# Request succeeded
assert_equal(
response.status_code,
http.OK,
)
self.user.reload()
# External account has been disassociated with the user
assert_not_in(
external_account,
self.user.external_accounts.all(),
)
# External account is still in the database
assert_equal(ExternalAccount.find().count(), 1)
other_user.reload()
# External account is still associated with the other user
assert_in(
external_account,
other_user.external_accounts.all(),
)
class TestExternalProviderOAuth1(OsfTestCase):
# Test functionality of the ExternalProvider class, for OAuth 1.0a
def setUp(self):
super(TestExternalProviderOAuth1, self).setUp()
self.user = UserFactory()
self.provider = MockOAuth1Provider()
@httpretty.activate
def test_start_flow(self):
# Request temporary credentials from provider, provide auth redirect
httpretty.register_uri(httpretty.POST, 'http://mock1a.com/request',
body='{"oauth_token_secret": "temp_secret", '
'"oauth_token": "temp_token", '
'"oauth_callback_confirmed": "true"}',
status=200,
content_type='application/json')
with self.app.app.test_request_context('/oauth/connect/mock1a/'):
# make sure the user is logged in
authenticate(user=self.user, access_token=None, response=None)
# auth_url is a property method - it calls out to the external
# service to get a temporary key and secret before returning the
# auth url
url = self.provider.auth_url
# The URL to which the user would be redirected
assert_equal(url, "http://mock1a.com/auth?oauth_token=temp_token")
# Temporary credentials are added to the session
creds = session.data['oauth_states'][self.provider.short_name]
assert_equal(creds['token'], 'temp_token')
assert_equal(creds['secret'], 'temp_secret')
@httpretty.activate
def test_callback(self):
# Exchange temporary credentials for permanent credentials
# mock a successful call to the provider to exchange temp keys for
# permanent keys
httpretty.register_uri(
httpretty.POST,
'http://mock1a.com/callback',
body=(
'oauth_token=perm_token'
'&oauth_token_secret=perm_secret'
'&oauth_callback_confirmed=true'
),
)
user = UserFactory()
# Fake a request context for the callback
ctx = self.app.app.test_request_context(
path='/oauth/callback/mock1a/',
query_string='oauth_token=temp_key&oauth_verifier=mock_verifier',
)
with ctx:
# make sure the user is logged in
authenticate(user=user, access_token=None, response=None)
session.data['oauth_states'] = {
self.provider.short_name: {
'token': 'temp_key',
'secret': 'temp_secret',
},
}
session.save()
# do the key exchange
self.provider.auth_callback(user=user)
account = ExternalAccount.objects.first()
assert_equal(account.oauth_key, 'perm_token')
assert_equal(account.oauth_secret, 'perm_secret')
assert_equal(account.provider_id, 'mock_provider_id')
assert_equal(account.provider_name, 'Mock OAuth 1.0a Provider')
@httpretty.activate
def test_callback_wrong_user(self):
# Reject temporary credentials not assigned to the user
#
# This prohibits users from associating their external account with
# another user's OSF account by using XSS or similar attack vector to
# complete the OAuth flow using the logged-in user but their own account
# on the external service.
#
# If the OSF were to allow login via OAuth with the provider in question,
# this would allow attackers to hijack OSF accounts with a simple script
# injection.
# mock a successful call to the provider to exchange temp keys for
# permanent keys
httpretty.register_uri(
httpretty.POST,
'http://mock1a.com/callback',
body='oauth_token=perm_token'
'&oauth_token_secret=perm_secret'
'&oauth_callback_confirmed=true',
)
user = UserFactory()
account = ExternalAccountFactory(
provider="mock1a",
provider_name='Mock 1A',
oauth_key="temp_key",
oauth_secret="temp_secret"
)
account.save()
# associate this ExternalAccount instance with the user
user.external_accounts.add(account)
user.save()
malicious_user = UserFactory()
# Fake a request context for the callback
with self.app.app.test_request_context(
path="/oauth/callback/mock1a/",
query_string="oauth_token=temp_key&oauth_verifier=mock_verifier"
):
# make sure the user is logged in
authenticate(user=malicious_user, access_token=None, response=None)
with assert_raises(PermissionsError):
# do the key exchange
self.provider.auth_callback(user=malicious_user)
class TestExternalProviderOAuth2(OsfTestCase):
# Test functionality of the ExternalProvider class, for OAuth 2.0
def setUp(self):
super(TestExternalProviderOAuth2, self).setUp()
self.user = UserFactory()
self.provider = MockOAuth2Provider()
def test_oauth_version_default(self):
# OAuth 2.0 is the default version
assert_is(self.provider._oauth_version, OAUTH2)
def test_start_flow(self):
# Generate the appropriate URL and state token
with self.app.app.test_request_context("/oauth/connect/mock2/"):
# make sure the user is logged in
authenticate(user=self.user, access_token=None, response=None)
# auth_url is a property method - it calls out to the external
# service to get a temporary key and secret before returning the
# auth url
url = self.provider.auth_url
# Temporary credentials are added to the session
creds = session.data['oauth_states'][self.provider.short_name]
assert_in('state', creds)
# The URL to which the user would be redirected
parsed = urlparse.urlparse(url)
params = urlparse.parse_qs(parsed.query)
# check parameters
assert_equal(
params,
{
'state': [creds['state']],
'response_type': ['code'],
'client_id': [self.provider.client_id],
'redirect_uri': [
web_url_for('oauth_callback',
service_name=self.provider.short_name,
_absolute=True)
]
}
)
# check base URL
assert_equal(
url.split("?")[0],
"https://mock2.com/auth",
)
@httpretty.activate
def test_callback(self):
# Exchange temporary credentials for permanent credentials
# Mock the exchange of the code for an access token
_prepare_mock_oauth2_handshake_response()
user = UserFactory()
# Fake a request context for the callback
with self.app.app.test_request_context(
path="/oauth/callback/mock2/",
query_string="code=mock_code&state=mock_state"
):
# make sure the user is logged in
authenticate(user=self.user, access_token=None, response=None)
session.data['oauth_states'] = {
self.provider.short_name: {
'state': 'mock_state',
},
}
session.save()
# do the key exchange
self.provider.auth_callback(user=user)
account = ExternalAccount.objects.first()
assert_equal(account.oauth_key, 'mock_access_token')
assert_equal(account.provider_id, 'mock_provider_id')
@httpretty.activate
def test_provider_down(self):
# Create a 500 error
_prepare_mock_500_error()
user = UserFactory()
# Fake a request context for the callback
with self.app.app.test_request_context(
path="/oauth/callback/mock2/",
query_string="code=mock_code&state=mock_state"
):
# make sure the user is logged in
authenticate(user=user, access_token=None, response=None)
session.data['oauth_states'] = {
self.provider.short_name: {
'state': 'mock_state',
},
}
session.save()
# do the key exchange
with assert_raises(HTTPError) as error_raised:
self.provider.auth_callback(user=user)
assert_equal(
error_raised.exception.code,
503,
)
@httpretty.activate
def test_user_denies_access(self):
# Create a 401 error
_prepare_mock_401_error()
user = UserFactory()
# Fake a request context for the callback
with self.app.app.test_request_context(
path="/oauth/callback/mock2/",
query_string="error=mock_error&code=mock_code&state=mock_state"
):
# make sure the user is logged in
authenticate(user=user, access_token=None, response=None)
session.data['oauth_states'] = {
self.provider.short_name: {
'state': 'mock_state',
},
}
session.save()
assert_false(self.provider.auth_callback(user=user))
@httpretty.activate
def test_multiple_users_associated(self):
# Create only one ExternalAccount for multiple OSF users
#
# For some providers (ex: GitHub), the act of completing the OAuth flow
# revokes previously generated credentials. In addition, there is often no
# way to know the user's id on the external service until after the flow
# has completed.
#
# Having only one ExternalAccount instance per account on the external
# service means that connecting subsequent OSF users to the same external
# account will not invalidate the credentials used by the OSF for users
# already associated.
user_a = UserFactory()
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
)
user_a.external_accounts.add(external_account)
user_a.save()
user_b = UserFactory()
# Mock the exchange of the code for an access token
_prepare_mock_oauth2_handshake_response()
# Fake a request context for the callback
with self.app.app.test_request_context(
path="/oauth/callback/mock2/",
query_string="code=mock_code&state=mock_state"
) as ctx:
# make sure the user is logged in
authenticate(user=user_b, access_token=None, response=None)
session.data['oauth_states'] = {
self.provider.short_name: {
'state': 'mock_state',
},
}
session.save()
# do the key exchange
self.provider.auth_callback(user=user_b)
user_a.reload()
user_b.reload()
external_account.reload()
assert_equal(
list(user_a.external_accounts.values_list('pk', flat=True)),
list(user_b.external_accounts.values_list('pk', flat=True)),
)
assert_equal(
ExternalAccount.find().count(),
1
)
@httpretty.activate
def test_force_refresh_oauth_key(self):
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
oauth_key='old_key',
oauth_secret='old_secret',
expires_at=datetime.utcfromtimestamp(time.time() - 200).replace(tzinfo=pytz.utc)
)
# mock a successful call to the provider to refresh tokens
httpretty.register_uri(
httpretty.POST,
self.provider.auto_refresh_url,
body=json.dumps({
'access_token': 'refreshed_access_token',
'expires_in': 3600,
'refresh_token': 'refreshed_refresh_token'
})
)
old_expiry = external_account.expires_at
self.provider.account = external_account
self.provider.refresh_oauth_key(force=True)
external_account.reload()
assert_equal(external_account.oauth_key, 'refreshed_access_token')
assert_equal(external_account.refresh_token, 'refreshed_refresh_token')
assert_not_equal(external_account.expires_at, old_expiry)
assert_true(external_account.expires_at > old_expiry)
@httpretty.activate
def test_does_need_refresh(self):
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
oauth_key='old_key',
oauth_secret='old_secret',
expires_at=datetime.utcfromtimestamp(time.time() - 200).replace(tzinfo=pytz.utc),
)
# mock a successful call to the provider to refresh tokens
httpretty.register_uri(
httpretty.POST,
self.provider.auto_refresh_url,
body=json.dumps({
'access_token': 'refreshed_access_token',
'expires_in': 3600,
'refresh_token': 'refreshed_refresh_token'
})
)
old_expiry = external_account.expires_at
self.provider.account = external_account
self.provider.refresh_oauth_key(force=False)
external_account.reload()
assert_equal(external_account.oauth_key, 'refreshed_access_token')
assert_equal(external_account.refresh_token, 'refreshed_refresh_token')
assert_not_equal(external_account.expires_at, old_expiry)
assert_true(external_account.expires_at > old_expiry)
@httpretty.activate
def test_does_not_need_refresh(self):
self.provider.refresh_time = 1
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
oauth_key='old_key',
oauth_secret='old_secret',
refresh_token='old_refresh',
expires_at=datetime.utcfromtimestamp(time.time() + 200).replace(tzinfo=pytz.utc),
)
# mock a successful call to the provider to refresh tokens
httpretty.register_uri(
httpretty.POST,
self.provider.auto_refresh_url,
body=json.dumps({
'err_msg': 'Should not be hit'
}),
status=500
)
# .reload() has the side effect of rounding the microsends down to 3 significant figures
# (e.g. DT(YMDHMS, 365420) becomes DT(YMDHMS, 365000)),
# but must occur after possible refresh to reload tokens.
# Doing so before allows the `old_expiry == EA.expires_at` comparison to work.
external_account.reload()
old_expiry = external_account.expires_at
self.provider.account = external_account
self.provider.refresh_oauth_key(force=False)
external_account.reload()
assert_equal(external_account.oauth_key, 'old_key')
assert_equal(external_account.refresh_token, 'old_refresh')
assert_equal(external_account.expires_at, old_expiry)
@httpretty.activate
def test_refresh_oauth_key_does_not_need_refresh(self):
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
oauth_key='old_key',
oauth_secret='old_secret',
expires_at=datetime.utcfromtimestamp(time.time() + 9999).replace(tzinfo=pytz.utc)
)
# mock a successful call to the provider to refresh tokens
httpretty.register_uri(
httpretty.POST,
self.provider.auto_refresh_url,
body=json.dumps({
'err_msg': 'Should not be hit'
}),
status=500
)
self.provider.account = external_account
ret = self.provider.refresh_oauth_key(force=False)
assert_false(ret)
@httpretty.activate
def test_refresh_with_broken_provider(self):
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
oauth_key='old_key',
oauth_secret='old_secret',
expires_at=datetime.utcfromtimestamp(time.time() - 200).replace(tzinfo=pytz.utc)
)
self.provider.client_id = None
self.provider.client_secret = None
self.provider.account = external_account
# mock a successful call to the provider to refresh tokens
httpretty.register_uri(
httpretty.POST,
self.provider.auto_refresh_url,
body=json.dumps({
'err_msg': 'Should not be hit'
}),
status=500
)
ret = self.provider.refresh_oauth_key(force=False)
assert_false(ret)
@httpretty.activate
def test_refresh_without_account_or_refresh_url(self):
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
oauth_key='old_key',
oauth_secret='old_secret',
expires_at=datetime.utcfromtimestamp(time.time() + 200).replace(tzinfo=pytz.utc)
)
# mock a successful call to the provider to refresh tokens
httpretty.register_uri(
httpretty.POST,
self.provider.auto_refresh_url,
body=json.dumps({
'err_msg': 'Should not be hit'
}),
status=500
)
ret = self.provider.refresh_oauth_key(force=False)
assert_false(ret)
@httpretty.activate
def test_refresh_with_expired_credentials(self):
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
oauth_key='old_key',
oauth_secret='old_secret',
expires_at=datetime.utcfromtimestamp(time.time() - 10000).replace(tzinfo=pytz.utc) # Causes has_expired_credentials to be True
)
self.provider.account = external_account
# mock a successful call to the provider to refresh tokens
httpretty.register_uri(
httpretty.POST,
self.provider.auto_refresh_url,
body=json.dumps({
'err': 'Should not be hit'
}),
status=500
)
ret = self.provider.refresh_oauth_key(force=False)
assert_false(ret)
@httpretty.activate
def test_force_refresh_with_expired_credentials(self):
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
provider_name='Mock Provider',
oauth_key='old_key',
oauth_secret='old_secret',
expires_at=datetime.utcfromtimestamp(time.time() - 10000).replace(tzinfo=pytz.utc) # Causes has_expired_credentials to be True
)
self.provider.account = external_account
# mock a failing call to the provider to refresh tokens
httpretty.register_uri(
httpretty.POST,
self.provider.auto_refresh_url,
body=json.dumps({
'error': 'invalid_grant',
}),
status=401
)
with assert_raises(OAuth2Error):
ret = self.provider.refresh_oauth_key(force=True)
| apache-2.0 |
simartin/servo | tests/wpt/web-platform-tests/tools/third_party/pytest/src/_pytest/skipping.py | 3 | 6605 | # -*- coding: utf-8 -*-
""" support for skip/xfail functions and markers. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from _pytest.config import hookimpl
from _pytest.mark.evaluate import MarkEvaluator
from _pytest.outcomes import fail
from _pytest.outcomes import skip
from _pytest.outcomes import xfail
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
"--runxfail",
action="store_true",
dest="runxfail",
default=False,
help="report the results of xfail tests as if they were not marked",
)
parser.addini(
"xfail_strict",
"default for the strict parameter of xfail "
"markers when not given explicitly (default: False)",
default=False,
type="bool",
)
def pytest_configure(config):
if config.option.runxfail:
# yay a hack
import pytest
old = pytest.xfail
config._cleanup.append(lambda: setattr(pytest, "xfail", old))
def nop(*args, **kwargs):
pass
nop.Exception = xfail.Exception
setattr(pytest, "xfail", nop)
config.addinivalue_line(
"markers",
"skip(reason=None): skip the given test function with an optional reason. "
'Example: skip(reason="no way of currently testing this") skips the '
"test.",
)
config.addinivalue_line(
"markers",
"skipif(condition): skip the given test function if eval(condition) "
"results in a True value. Evaluation happens within the "
"module global context. Example: skipif('sys.platform == \"win32\"') "
"skips the test if we are on the win32 platform. see "
"https://docs.pytest.org/en/latest/skipping.html",
)
config.addinivalue_line(
"markers",
"xfail(condition, reason=None, run=True, raises=None, strict=False): "
"mark the test function as an expected failure if eval(condition) "
"has a True value. Optionally specify a reason for better reporting "
"and run=False if you don't even want to execute the test function. "
"If only specific exception(s) are expected, you can list them in "
"raises, and if the test fails in other ways, it will be reported as "
"a true failure. See https://docs.pytest.org/en/latest/skipping.html",
)
@hookimpl(tryfirst=True)
def pytest_runtest_setup(item):
# Check if skip or skipif are specified as pytest marks
item._skipped_by_mark = False
eval_skipif = MarkEvaluator(item, "skipif")
if eval_skipif.istrue():
item._skipped_by_mark = True
skip(eval_skipif.getexplanation())
for skip_info in item.iter_markers(name="skip"):
item._skipped_by_mark = True
if "reason" in skip_info.kwargs:
skip(skip_info.kwargs["reason"])
elif skip_info.args:
skip(skip_info.args[0])
else:
skip("unconditional skip")
item._evalxfail = MarkEvaluator(item, "xfail")
check_xfail_no_run(item)
@hookimpl(hookwrapper=True)
def pytest_pyfunc_call(pyfuncitem):
check_xfail_no_run(pyfuncitem)
outcome = yield
passed = outcome.excinfo is None
if passed:
check_strict_xfail(pyfuncitem)
def check_xfail_no_run(item):
"""check xfail(run=False)"""
if not item.config.option.runxfail:
evalxfail = item._evalxfail
if evalxfail.istrue():
if not evalxfail.get("run", True):
xfail("[NOTRUN] " + evalxfail.getexplanation())
def check_strict_xfail(pyfuncitem):
"""check xfail(strict=True) for the given PASSING test"""
evalxfail = pyfuncitem._evalxfail
if evalxfail.istrue():
strict_default = pyfuncitem.config.getini("xfail_strict")
is_strict_xfail = evalxfail.get("strict", strict_default)
if is_strict_xfail:
del pyfuncitem._evalxfail
explanation = evalxfail.getexplanation()
fail("[XPASS(strict)] " + explanation, pytrace=False)
@hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
rep = outcome.get_result()
evalxfail = getattr(item, "_evalxfail", None)
# unitttest special case, see setting of _unexpectedsuccess
if hasattr(item, "_unexpectedsuccess") and rep.when == "call":
from _pytest.compat import _is_unittest_unexpected_success_a_failure
if item._unexpectedsuccess:
rep.longrepr = "Unexpected success: {}".format(item._unexpectedsuccess)
else:
rep.longrepr = "Unexpected success"
if _is_unittest_unexpected_success_a_failure():
rep.outcome = "failed"
else:
rep.outcome = "passed"
rep.wasxfail = rep.longrepr
elif item.config.option.runxfail:
pass # don't interefere
elif call.excinfo and call.excinfo.errisinstance(xfail.Exception):
rep.wasxfail = "reason: " + call.excinfo.value.msg
rep.outcome = "skipped"
elif evalxfail and not rep.skipped and evalxfail.wasvalid() and evalxfail.istrue():
if call.excinfo:
if evalxfail.invalidraise(call.excinfo.value):
rep.outcome = "failed"
else:
rep.outcome = "skipped"
rep.wasxfail = evalxfail.getexplanation()
elif call.when == "call":
strict_default = item.config.getini("xfail_strict")
is_strict_xfail = evalxfail.get("strict", strict_default)
explanation = evalxfail.getexplanation()
if is_strict_xfail:
rep.outcome = "failed"
rep.longrepr = "[XPASS(strict)] {}".format(explanation)
else:
rep.outcome = "passed"
rep.wasxfail = explanation
elif (
getattr(item, "_skipped_by_mark", False)
and rep.skipped
and type(rep.longrepr) is tuple
):
# skipped by mark.skipif; change the location of the failure
# to point to the item definition, otherwise it will display
# the location of where the skip exception was raised within pytest
filename, line, reason = rep.longrepr
filename, line = item.location[:2]
rep.longrepr = filename, line, reason
# called by terminalreporter progress reporting
def pytest_report_teststatus(report):
if hasattr(report, "wasxfail"):
if report.skipped:
return "xfailed", "x", "XFAIL"
elif report.passed:
return "xpassed", "X", "XPASS"
| mpl-2.0 |
gameduell/duell | pylib/werkzeug/contrib/limiter.py | 365 | 1334 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.limiter
~~~~~~~~~~~~~~~~~~~~~~~~
A middleware that limits incoming data. This works around problems with
Trac_ or Django_ because those directly stream into the memory.
.. _Trac: http://trac.edgewall.org/
.. _Django: http://www.djangoproject.com/
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from warnings import warn
from werkzeug.wsgi import LimitedStream
class StreamLimitMiddleware(object):
"""Limits the input stream to a given number of bytes. This is useful if
you have a WSGI application that reads form data into memory (django for
example) and you don't want users to harm the server by uploading tons of
data.
Default is 10MB
.. versionchanged:: 0.9
Deprecated middleware.
"""
def __init__(self, app, maximum_size=1024 * 1024 * 10):
warn(DeprecationWarning('This middleware is deprecated'))
self.app = app
self.maximum_size = maximum_size
def __call__(self, environ, start_response):
limit = min(self.maximum_size, int(environ.get('CONTENT_LENGTH') or 0))
environ['wsgi.input'] = LimitedStream(environ['wsgi.input'], limit)
return self.app(environ, start_response)
| bsd-2-clause |
aosp-hybris/android_kernel_goldfish | tools/perf/scripts/python/net_dropmonitor.py | 1258 | 1562 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
linecount = 0
for line in f:
linecount = linecount+1
f.seek(0)
except:
return
j = 0
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
j = j +1
if ((j % 100) == 0):
print "\r" + str(j) + "/" + str(linecount),
kallsyms.append({ 'loc': loc, 'name' : name})
print "\r" + str(j) + "/" + str(linecount)
kallsyms.sort()
return
def get_sym(sloc):
loc = int(sloc)
for i in kallsyms[::-1]:
if loc >= i['loc']:
return (i['name'], loc - i['loc'])
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
chromium/chromium | ui/webui/resources/PRESUBMIT.py | 4 | 3893 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
USE_PYTHON3 = True
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CommonChecks(input_api, output_api)
def _CheckForTranslations(input_api, output_api):
shared_keywords = ['i18n(']
html_keywords = shared_keywords + ['$118n{']
js_keywords = shared_keywords + ['I18nBehavior', 'loadTimeData.get']
errors = []
for f in input_api.AffectedFiles():
local_path = f.LocalPath()
# Allow translation in i18n_behavior.js.
if local_path.endswith('i18n_behavior.js'):
continue
# Allow translation in the cr_components directory.
if 'cr_components' in local_path:
continue
keywords = None
if local_path.endswith('.js'):
keywords = js_keywords
elif local_path.endswith('.html'):
keywords = html_keywords
if not keywords:
continue
for lnum, line in f.ChangedContents():
if any(line for keyword in keywords if keyword in line):
errors.append("%s:%d\n%s" % (f.LocalPath(), lnum, line))
if not errors:
return []
return [output_api.PresubmitError("\n".join(errors) + """
Don't embed translations directly in shared UI code. Instead, inject your
translation from the place using the shared code. For an example: see
<cr-dialog>#closeText (http://bit.ly/2eLEsqh).""")]
def _CheckSvgsOptimized(input_api, output_api):
results = []
try:
import sys
old_sys_path = sys.path[:]
cwd = input_api.PresubmitLocalPath()
sys.path += [input_api.os_path.join(cwd, '..', '..', '..', 'tools')]
from resources import svgo_presubmit
results += svgo_presubmit.CheckOptimized(input_api, output_api)
finally:
sys.path = old_sys_path
return results
def _CheckWebDevStyle(input_api, output_api):
results = []
try:
import sys
old_sys_path = sys.path[:]
cwd = input_api.PresubmitLocalPath()
sys.path += [input_api.os_path.join(cwd, '..', '..', '..', 'tools')]
from web_dev_style import presubmit_support
results += presubmit_support.CheckStyle(input_api, output_api)
finally:
sys.path = old_sys_path
return results
def _CheckJsModulizer(input_api, output_api):
affected = input_api.AffectedFiles()
affected_files = [input_api.os_path.basename(f.LocalPath()) for f in affected]
results = []
if 'js_modulizer.py' in affected_files:
presubmit_path = input_api.PresubmitLocalPath()
sources = [input_api.os_path.join('tools', 'js_modulizer_test.py')]
tests = [input_api.os_path.join(presubmit_path, s) for s in sources]
results += input_api.canned_checks.RunUnitTests(
input_api, output_api, tests)
return results
def _CheckGenerateGrd(input_api, output_api):
affected = input_api.AffectedFiles()
affected_files = [input_api.os_path.basename(f.LocalPath()) for f in affected]
results = []
if 'generate_grd.py' in affected_files:
presubmit_path = input_api.PresubmitLocalPath()
sources = [input_api.os_path.join('tools', 'generate_grd_test.py')]
tests = [input_api.os_path.join(presubmit_path, s) for s in sources]
results += input_api.canned_checks.RunUnitTests(
input_api, output_api, tests)
return results
def _CommonChecks(input_api, output_api):
results = []
results += _CheckForTranslations(input_api, output_api)
results += _CheckSvgsOptimized(input_api, output_api)
results += _CheckWebDevStyle(input_api, output_api)
results += _CheckJsModulizer(input_api, output_api)
results += _CheckGenerateGrd(input_api, output_api)
results += input_api.canned_checks.CheckPatchFormatted(input_api, output_api,
check_js=True)
return results
| bsd-3-clause |
noba3/KoTos | addons/plugin.video.mega/resources/lib/platform_libraries/Linux/arm/Crypto/SelfTest/Random/OSRNG/__init__.py | 118 | 2082 | # -*- coding: utf-8 -*-
#
# SelfTest/Random/OSRNG/__init__.py: Self-test for OSRNG modules
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test for Crypto.Random.OSRNG package"""
__revision__ = "$Id$"
import os
def get_tests(config={}):
tests = []
if os.name == 'nt':
from Crypto.SelfTest.Random.OSRNG import test_nt; tests += test_nt.get_tests(config=config)
from Crypto.SelfTest.Random.OSRNG import test_winrandom; tests += test_winrandom.get_tests(config=config)
elif os.name == 'posix':
from Crypto.SelfTest.Random.OSRNG import test_posix; tests += test_posix.get_tests(config=config)
if hasattr(os, 'urandom'):
from Crypto.SelfTest.Random.OSRNG import test_fallback; tests += test_fallback.get_tests(config=config)
from Crypto.SelfTest.Random.OSRNG import test_generic; tests += test_generic.get_tests(config=config)
return tests
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| gpl-2.0 |
jinxiaoye1987/RyzomCore | ryzom/tools/build_gamedata/workspace/common/outgame/process.py | 3 | 2951 | #!/usr/bin/python
#
# \file config.py
# \brief Process configuration
# \date 2010-08-27 17:02GMT
# \author Jan Boon (Kaetemi)
# Python port of game data build pipeline.
# Process configuration.
#
# NeL - MMORPG Framework <http://dev.ryzom.com/projects/nel/>
# Copyright (C) 2010 Winch Gate Property Limited
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# *** PROCESS CONFIGURATION ***
# *** PROCESS CONFIG ***
ProcessToComplete = [ ]
ProcessToComplete += [ "properties" ]
ProcessToComplete += [ "shape" ]
ProcessToComplete += [ "map" ]
ProcessToComplete += [ "ig" ]
ProcessToComplete += [ "ig_light" ]
# *** COMMON NAMES AND PATHS ***
EcosystemName = "outgame"
EcosystemPath = "common/" + EcosystemName
ContinentName = EcosystemName
ContinentPath = EcosystemPath
CommonName = ContinentName
CommonPath = ContinentPath
# *** SHAPE EXPORT OPTIONS ***
# Compute lightmaps ?
ShapeExportOptExportLighting = "true"
# Cast shadow in lightmap ?
ShapeExportOptShadow = "true"
# Lighting limits. 0 : normal, 1 : soft shadows
ShapeExportOptLightingLimit = 0
# Lightmap lumel size
ShapeExportOptLumelSize = "0.25"
# Oversampling value. Can be 1, 2, 4 or 8
ShapeExportOptOversampling = 1
# Does the lightmap must be generated in 8 bits format ?
ShapeExportOpt8BitsLightmap = "false"
# Does the lightmaps export must generate logs ?
ShapeExportOptLightmapLog = "true"
# Coarse mesh texture mul size
TextureMulSizeValue = "1.5"
DoBuildShadowSkin = 0
ClodConfigFile = ""
# *** COARSE MESH TEXTURE NAME ***
CoarseMeshTextureNames = [ ]
# *** POSTFIX USED BY THE MULTIPLE TILES SYSTEM ***
MultipleTilesPostfix = [ ]
MultipleTilesPostfix += [ "_sp" ]
MultipleTilesPostfix += [ "_su" ]
MultipleTilesPostfix += [ "_au" ]
MultipleTilesPostfix += [ "_wi" ]
# Name of the tilebank to use
BankTileBankName = ""
# *** LANDSCAPE NAME ***
LandscapeName = ""
# *** LIGO OPTIONS ***
LigoExportLand = ""
LigoExportOnePass = 0
LigoExportColormap = "colormap_invalid.png"
LigoExportHeightmap1 = "big_invalid.png"
LigoExportZFactor1 = "1.0"
LigoExportHeightmap2 = "noise_invalid.png"
LigoExportZFactor2 = "0.5"
LigoTileBankFile = ""
# *** MAPS OPTIONS ***
ReduceBitmapFactor = 0
# list all panoply files
MapPanoplyFileList = None
# name of the .hlsbank to build.
MapHlsBankFileName = None
# *** ANIMATIONS OPTIONS ***
DoOptimizeAnimations = 0
| agpl-3.0 |
ARudiuk/mne-python | tutorials/plot_artifacts_correction_ssp.py | 5 | 2983 | """
.. _tut_artifacts_correct_ssp:
Artifact Correction with SSP
============================
"""
import numpy as np
import mne
from mne.datasets import sample
from mne.preprocessing import compute_proj_ecg, compute_proj_eog
# getting some data ready
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.pick_types(meg=True, ecg=True, eog=True, stim=True)
##############################################################################
# Compute SSP projections
# -----------------------
projs, events = compute_proj_ecg(raw, n_grad=1, n_mag=1, average=True)
print(projs)
ecg_projs = projs[-2:]
mne.viz.plot_projs_topomap(ecg_projs)
# Now for EOG
projs, events = compute_proj_eog(raw, n_grad=1, n_mag=1, average=True)
print(projs)
eog_projs = projs[-2:]
mne.viz.plot_projs_topomap(eog_projs)
##############################################################################
# Apply SSP projections
# ---------------------
#
# MNE is handling projections at the level of the info,
# so to register them populate the list that you find in the 'proj' field
raw.info['projs'] += eog_projs + ecg_projs
#############################################################################
# Yes this was it. Now MNE will apply the projs on demand at any later stage,
# so watch out for proj parmeters in functions or to it explicitly
# with the ``.apply_proj`` method
#############################################################################
# Demonstrate SSP cleaning on some evoked data
# --------------------------------------------
events = mne.find_events(raw, stim_channel='STI 014')
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
# this can be highly data dependent
event_id = {'auditory/left': 1}
epochs_no_proj = mne.Epochs(raw, events, event_id, tmin=-0.2, tmax=0.5,
proj=False, baseline=(None, 0), reject=reject)
epochs_no_proj.average().plot(spatial_colors=True)
epochs_proj = mne.Epochs(raw, events, event_id, tmin=-0.2, tmax=0.5, proj=True,
baseline=(None, 0), reject=reject)
epochs_proj.average().plot(spatial_colors=True)
##############################################################################
# Looks cool right? It is however often not clear how many components you
# should take and unfortunately this can have bad consequences as can be seen
# interactively using the delayed SSP mode:
evoked = mne.Epochs(raw, events, event_id, tmin=-0.2, tmax=0.5,
proj='delayed', baseline=(None, 0),
reject=reject).average()
# set time instants in seconds (from 50 to 150ms in a step of 10ms)
times = np.arange(0.05, 0.15, 0.01)
evoked.plot_topomap(times, proj='interactive')
##############################################################################
# now you should see checkboxes. Remove a few SSP and see how the auditory
# pattern suddenly drops off
| bsd-3-clause |
jaywink/social-federation | federation/tests/hostmeta/django/test_generators.py | 2 | 4453 | import json
from unittest.mock import patch, Mock
from django.test import RequestFactory
from federation.hostmeta.django import rfc7033_webfinger_view
from federation.hostmeta.django.generators import nodeinfo2_view
from federation.utils.django import get_function_from_config
from federation.tests.fixtures.hostmeta import NODEINFO2_10_DOC
def test_get_function_from_config():
func = get_function_from_config("get_profile_function")
assert callable(func)
class TestNodeInfo2View:
def test_returns_400_if_not_configured(self):
request = RequestFactory().get('/.well-known/x-nodeinfo2')
response = nodeinfo2_view(request)
assert response.status_code == 400
@patch("federation.hostmeta.django.generators.get_function_from_config")
def test_returns_200(self, mock_get_func):
mock_get_func.return_value = Mock(return_value=json.loads(NODEINFO2_10_DOC))
request = RequestFactory().get('/.well-known/x-nodeinfo2')
response = nodeinfo2_view(request)
assert response.status_code == 200
class TestRFC7033WebfingerView:
@patch("federation.hostmeta.django.generators.get_function_from_config")
def test_handle_lowercased(self, mock_get_func):
mock_get_profile = Mock(side_effect=Exception)
mock_get_func.return_value = mock_get_profile
request = RequestFactory().get("/.well-known/webfinger?resource=acct:Foobar@example.com")
try:
rfc7033_webfinger_view(request)
except Exception:
pass
mock_get_profile.assert_called_once_with(handle='foobar@example.com', request=request)
def test_no_resource_returns_bad_request(self):
request = RequestFactory().get("/.well-known/webfinger")
response = rfc7033_webfinger_view(request)
assert response.status_code == 400
def test_invalid_resource_returns_bad_request(self):
request = RequestFactory().get("/.well-known/webfinger?resource=foobar")
response = rfc7033_webfinger_view(request)
assert response.status_code == 400
@patch("federation.hostmeta.django.generators.get_function_from_config")
def test_unknown_handle_returns_not_found(self, mock_get_func):
mock_get_func.return_value = Mock(side_effect=Exception)
request = RequestFactory().get("/.well-known/webfinger?resource=acct:foobar@domain.tld")
response = rfc7033_webfinger_view(request)
assert response.status_code == 404
def test_rendered_webfinger_returned(self):
request = RequestFactory().get("/.well-known/webfinger?resource=acct:foobar@example.com")
response = rfc7033_webfinger_view(request)
assert response.status_code == 200
assert response['Content-Type'] == "application/jrd+json"
assert json.loads(response.content.decode("utf-8")) == {
"subject": "acct:foobar@example.com",
"aliases": [
"https://example.com/profile/1234/",
"https://example.com/p/1234/",
],
"links": [
{
"rel": "http://microformats.org/profile/hcard",
"type": "text/html",
"href": "https://example.com/hcard/users/1234",
},
{
"rel": "http://joindiaspora.com/seed_location",
"type": "text/html",
"href": "https://example.com",
},
{
"rel": "http://webfinger.net/rel/profile-page",
"type": "text/html",
"href": "https://example.com/profile/1234/",
},
{
"rel": "salmon",
"href": "https://example.com/receive/users/1234",
},
{
"rel": "self",
"href": "https://example.com/p/1234/",
"type": "application/activity+json",
},
{
"rel": "http://schemas.google.com/g/2010#updates-from",
"type": "application/atom+xml",
"href": "https://example.com/profile/1234/atom.xml",
},
{
"rel": "http://ostatus.org/schema/1.0/subscribe",
"template": "https://example.com/search?q={uri}",
},
],
}
| bsd-3-clause |
ktdreyer/teuthology | teuthology/test/test_schedule.py | 10 | 1212 | from ..schedule import build_config
from ..misc import get_user
class TestSchedule(object):
basic_args = {
'--verbose': False,
'--owner': 'OWNER',
'--description': 'DESC',
'--email': 'EMAIL',
'--last-in-suite': True,
'--name': 'NAME',
'--worker': 'tala',
'--timeout': '6',
'--priority': '99',
# TODO: make this work regardless of $PWD
#'<conf_file>': ['../../examples/3node_ceph.yaml',
# '../../examples/3node_rgw.yaml'],
}
def test_basic(self):
expected = {
'description': 'DESC',
'email': 'EMAIL',
'last_in_suite': True,
'machine_type': 'tala',
'name': 'NAME',
'owner': 'OWNER',
'priority': 99,
'results_timeout': '6',
'verbose': False,
'tube': 'tala',
}
job_dict = build_config(self.basic_args)
assert job_dict == expected
def test_owner(self):
args = self.basic_args
args['--owner'] = None
job_dict = build_config(self.basic_args)
assert job_dict['owner'] == 'scheduled_%s' % get_user()
| mit |
kuiwei/edx-platform | lms/djangoapps/shoppingcart/processors/CyberSource2.py | 8 | 24318 | """
Implementation of the CyberSource credit card processor using the newer "Secure Acceptance API".
The previous Hosted Order Page API is being deprecated as of 9/14.
For now, we're keeping the older implementation in the code-base so we can
quickly roll-back by updating the configuration. Eventually, we should replace
the original implementation with this version.
To enable this implementation, add the following Django settings:
CC_PROCESSOR_NAME = "CyberSource2"
CC_PROCESSOR = {
"CyberSource2": {
"SECRET_KEY": "<secret key>",
"ACCESS_KEY": "<access key>",
"PROFILE_ID": "<profile ID>",
"PURCHASE_ENDPOINT": "<purchase endpoint>"
}
}
"""
import hmac
import binascii
import re
import json
import uuid
import logging
from textwrap import dedent
from datetime import datetime
from collections import OrderedDict, defaultdict
from decimal import Decimal, InvalidOperation
from hashlib import sha256
from django.conf import settings
from django.utils.translation import ugettext as _
from edxmako.shortcuts import render_to_string
from shoppingcart.models import Order
from shoppingcart.processors.exceptions import *
from shoppingcart.processors.helpers import get_processor_config
from microsite_configuration import microsite
log = logging.getLogger(__name__)
def process_postpay_callback(params):
"""
Handle a response from the payment processor.
Concrete implementations should:
1) Verify the parameters and determine if the payment was successful.
2) If successful, mark the order as purchased and call `purchased_callbacks` of the cart items.
3) If unsuccessful, try to figure out why and generate a helpful error message.
4) Return a dictionary of the form:
{'success': bool, 'order': Order, 'error_html': str}
Args:
params (dict): Dictionary of parameters received from the payment processor.
Keyword Args:
Can be used to provide additional information to concrete implementations.
Returns:
dict
"""
try:
valid_params = verify_signatures(params)
result = _payment_accepted(
valid_params['req_reference_number'],
valid_params['auth_amount'],
valid_params['req_currency'],
valid_params['decision']
)
if result['accepted']:
_record_purchase(params, result['order'])
return {
'success': True,
'order': result['order'],
'error_html': ''
}
else:
_record_payment_info(params, result['order'])
return {
'success': False,
'order': result['order'],
'error_html': _get_processor_decline_html(params)
}
except CCProcessorException as error:
log.exception('error processing CyberSource postpay callback')
# if we have the order and the id, log it
if hasattr(error, 'order'):
_record_payment_info(params, error.order)
else:
log.info(json.dumps(params))
return {
'success': False,
'order': None, # due to exception we may not have the order
'error_html': _get_processor_exception_html(error)
}
def processor_hash(value):
"""
Calculate the base64-encoded, SHA-256 hash used by CyberSource.
Args:
value (string): The value to encode.
Returns:
string
"""
secret_key = get_processor_config().get('SECRET_KEY', '')
hash_obj = hmac.new(secret_key.encode('utf-8'), value.encode('utf-8'), sha256)
return binascii.b2a_base64(hash_obj.digest())[:-1] # last character is a '\n', which we don't want
def verify_signatures(params):
"""
Use the signature we receive in the POST back from CyberSource to verify
the identity of the sender (CyberSource) and that the contents of the message
have not been tampered with.
Args:
params (dictionary): The POST parameters we received from CyberSource.
Returns:
dict: Contains the parameters we will use elsewhere, converted to the
appropriate types
Raises:
CCProcessorSignatureException: The calculated signature does not match
the signature we received.
CCProcessorDataException: The parameters we received from CyberSource were not valid
(missing keys, wrong types)
"""
# First see if the user cancelled the transaction
# if so, then not all parameters will be passed back so we can't yet verify signatures
if params.get('decision') == u'CANCEL':
raise CCProcessorUserCancelled()
# Validate the signature to ensure that the message is from CyberSource
# and has not been tampered with.
signed_fields = params.get('signed_field_names', '').split(',')
data = u",".join([u"{0}={1}".format(k, params.get(k, '')) for k in signed_fields])
returned_sig = params.get('signature', '')
if processor_hash(data) != returned_sig:
raise CCProcessorSignatureException()
# Validate that we have the paramters we expect and can convert them
# to the appropriate types.
# Usually validating the signature is sufficient to validate that these
# fields exist, but since we're relying on CyberSource to tell us
# which fields they included in the signature, we need to be careful.
valid_params = {}
required_params = [
('req_reference_number', int),
('req_currency', str),
('decision', str),
('auth_amount', Decimal),
]
for key, key_type in required_params:
if key not in params:
raise CCProcessorDataException(
_(
u"The payment processor did not return a required parameter: {parameter}"
).format(parameter=key)
)
try:
valid_params[key] = key_type(params[key])
except (ValueError, TypeError, InvalidOperation):
raise CCProcessorDataException(
_(
u"The payment processor returned a badly-typed value {value} for parameter {parameter}."
).format(value=params[key], parameter=key)
)
return valid_params
def sign(params):
"""
Sign the parameters dictionary so CyberSource can validate our identity.
The params dict should contain a key 'signed_field_names' that is a comma-separated
list of keys in the dictionary. The order of this list is important!
Args:
params (dict): Dictionary of parameters; must include a 'signed_field_names' key
Returns:
dict: The same parameters dict, with a 'signature' key calculated from the other values.
"""
fields = u",".join(params.keys())
params['signed_field_names'] = fields
signed_fields = params.get('signed_field_names', '').split(',')
values = u",".join([u"{0}={1}".format(i, params.get(i, '')) for i in signed_fields])
params['signature'] = processor_hash(values)
params['signed_field_names'] = fields
return params
def render_purchase_form_html(cart, callback_url=None, extra_data=None):
"""
Renders the HTML of the hidden POST form that must be used to initiate a purchase with CyberSource
Args:
cart (Order): The order model representing items in the user's cart.
Keyword Args:
callback_url (unicode): The URL that CyberSource should POST to when the user
completes a purchase. If not provided, then CyberSource will use
the URL provided by the administrator of the account
(CyberSource config, not LMS config).
extra_data (list): Additional data to include as merchant-defined data fields.
Returns:
unicode: The rendered HTML form.
"""
return render_to_string('shoppingcart/cybersource_form.html', {
'action': get_purchase_endpoint(),
'params': get_signed_purchase_params(
cart, callback_url=callback_url, extra_data=extra_data
),
})
def get_signed_purchase_params(cart, callback_url=None, extra_data=None):
"""
This method will return a digitally signed set of CyberSource parameters
Args:
cart (Order): The order model representing items in the user's cart.
Keyword Args:
callback_url (unicode): The URL that CyberSource should POST to when the user
completes a purchase. If not provided, then CyberSource will use
the URL provided by the administrator of the account
(CyberSource config, not LMS config).
extra_data (list): Additional data to include as merchant-defined data fields.
Returns:
dict
"""
return sign(get_purchase_params(cart, callback_url=callback_url, extra_data=extra_data))
def get_purchase_params(cart, callback_url=None, extra_data=None):
"""
This method will build out a dictionary of parameters needed by CyberSource to complete the transaction
Args:
cart (Order): The order model representing items in the user's cart.
Keyword Args:
callback_url (unicode): The URL that CyberSource should POST to when the user
completes a purchase. If not provided, then CyberSource will use
the URL provided by the administrator of the account
(CyberSource config, not LMS config).
extra_data (list): Additional data to include as merchant-defined data fields.
Returns:
dict
"""
total_cost = cart.total_cost
amount = "{0:0.2f}".format(total_cost)
params = OrderedDict()
params['amount'] = amount
params['currency'] = cart.currency
params['orderNumber'] = "OrderId: {0:d}".format(cart.id)
params['access_key'] = get_processor_config().get('ACCESS_KEY', '')
params['profile_id'] = get_processor_config().get('PROFILE_ID', '')
params['reference_number'] = cart.id
params['transaction_type'] = 'sale'
params['locale'] = 'en'
params['signed_date_time'] = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
params['signed_field_names'] = 'access_key,profile_id,amount,currency,transaction_type,reference_number,signed_date_time,locale,transaction_uuid,signed_field_names,unsigned_field_names,orderNumber'
params['unsigned_field_names'] = ''
params['transaction_uuid'] = uuid.uuid4().hex
params['payment_method'] = 'card'
if callback_url is not None:
params['override_custom_receipt_page'] = callback_url
params['override_custom_cancel_page'] = callback_url
if extra_data is not None:
# CyberSource allows us to send additional data in "merchant defined data" fields
for num, item in enumerate(extra_data, start=1):
key = u"merchant_defined_data{num}".format(num=num)
params[key] = item
return params
def get_purchase_endpoint():
"""
Return the URL of the payment end-point for CyberSource.
Returns:
unicode
"""
return get_processor_config().get('PURCHASE_ENDPOINT', '')
def _payment_accepted(order_id, auth_amount, currency, decision):
"""
Check that CyberSource has accepted the payment.
Args:
order_num (int): The ID of the order associated with this payment.
auth_amount (Decimal): The amount the user paid using CyberSource.
currency (str): The currency code of the payment.
decision (str): "ACCEPT" if the payment was accepted.
Returns:
dictionary of the form:
{
'accepted': bool,
'amnt_charged': int,
'currency': string,
'order': Order
}
Raises:
CCProcessorDataException: The order does not exist.
CCProcessorWrongAmountException: The user did not pay the correct amount.
"""
try:
order = Order.objects.get(id=order_id)
except Order.DoesNotExist:
raise CCProcessorDataException(_("The payment processor accepted an order whose number is not in our system."))
if decision == 'ACCEPT':
if auth_amount == order.total_cost and currency == order.currency:
return {
'accepted': True,
'amt_charged': auth_amount,
'currency': currency,
'order': order
}
else:
ex = CCProcessorWrongAmountException(
_(
u"The amount charged by the processor {charged_amount} {charged_amount_currency} is different "
u"than the total cost of the order {total_cost} {total_cost_currency}."
).format(
charged_amount=auth_amount,
charged_amount_currency=currency,
total_cost=order.total_cost,
total_cost_currency=order.currency
)
)
#pylint: disable=attribute-defined-outside-init
ex.order = order
raise ex
else:
return {
'accepted': False,
'amt_charged': 0,
'currency': 'usd',
'order': order
}
def _record_purchase(params, order):
"""
Record the purchase and run purchased_callbacks
Args:
params (dict): The parameters we received from CyberSource.
order (Order): The order associated with this payment.
Returns:
None
"""
# Usually, the credit card number will have the form "xxxxxxxx1234"
# Parse the string to retrieve the digits.
# If we can't find any digits, use placeholder values instead.
ccnum_str = params.get('req_card_number', '')
mm = re.search("\d", ccnum_str)
if mm:
ccnum = ccnum_str[mm.start():]
else:
ccnum = "####"
# Mark the order as purchased and store the billing information
order.purchase(
first=params.get('req_bill_to_forename', ''),
last=params.get('req_bill_to_surname', ''),
street1=params.get('req_bill_to_address_line1', ''),
street2=params.get('req_bill_to_address_line2', ''),
city=params.get('req_bill_to_address_city', ''),
state=params.get('req_bill_to_address_state', ''),
country=params.get('req_bill_to_address_country', ''),
postalcode=params.get('req_bill_to_address_postal_code', ''),
ccnum=ccnum,
cardtype=CARDTYPE_MAP[params.get('req_card_type', '')],
processor_reply_dump=json.dumps(params)
)
def _record_payment_info(params, order):
"""
Record the purchase and run purchased_callbacks
Args:
params (dict): The parameters we received from CyberSource.
Returns:
None
"""
order.processor_reply_dump = json.dumps(params)
order.save()
def _get_processor_decline_html(params):
"""
Return HTML indicating that the user's payment was declined.
Args:
params (dict): Parameters we received from CyberSource.
Returns:
unicode: The rendered HTML.
"""
payment_support_email = microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
return _format_error_html(
_(
"Sorry! Our payment processor did not accept your payment. "
"The decision they returned was {decision}, "
"and the reason was {reason}. "
"You were not charged. Please try a different form of payment. "
"Contact us with payment-related questions at {email}."
).format(
decision='<span class="decision">{decision}</span>'.format(decision=params['decision']),
reason='<span class="reason">{reason_code}:{reason_msg}</span>'.format(
reason_code=params['reason_code'],
reason_msg=REASONCODE_MAP.get(params['reason_code'])
),
email=payment_support_email
)
)
def _get_processor_exception_html(exception):
"""
Return HTML indicating that an error occurred.
Args:
exception (CCProcessorException): The exception that occurred.
Returns:
unicode: The rendered HTML.
"""
payment_support_email = microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
if isinstance(exception, CCProcessorDataException):
return _format_error_html(
_(
u"Sorry! Our payment processor sent us back a payment confirmation that had inconsistent data! "
u"We apologize that we cannot verify whether the charge went through and take further action on your order. "
u"The specific error message is: {msg} "
u"Your credit card may possibly have been charged. Contact us with payment-specific questions at {email}."
).format(
msg=u'<span class="exception_msg">{msg}</span>'.format(msg=exception.message),
email=payment_support_email
)
)
elif isinstance(exception, CCProcessorWrongAmountException):
return _format_error_html(
_(
u"Sorry! Due to an error your purchase was charged for a different amount than the order total! "
u"The specific error message is: {msg}. "
u"Your credit card has probably been charged. Contact us with payment-specific questions at {email}."
).format(
msg=u'<span class="exception_msg">{msg}</span>'.format(msg=exception.message),
email=payment_support_email
)
)
elif isinstance(exception, CCProcessorSignatureException):
return _format_error_html(
_(
u"Sorry! Our payment processor sent us back a corrupted message regarding your charge, so we are "
u"unable to validate that the message actually came from the payment processor. "
u"The specific error message is: {msg}. "
u"We apologize that we cannot verify whether the charge went through and take further action on your order. "
u"Your credit card may possibly have been charged. Contact us with payment-specific questions at {email}."
).format(
msg=u'<span class="exception_msg">{msg}</span>'.format(msg=exception.message),
email=payment_support_email
)
)
elif isinstance(exception, CCProcessorUserCancelled):
return _format_error_html(
_(
u"Sorry! Our payment processor sent us back a message saying that you have cancelled this transaction. "
u"The items in your shopping cart will exist for future purchase. "
u"If you feel that this is in error, please contact us with payment-specific questions at {email}."
).format(
email=payment_support_email
)
)
else:
return _format_error_html(
_(
u"Sorry! Your payment could not be processed because an unexpected exception occurred. "
u"Please contact us at {email} for assistance."
).format(email=payment_support_email)
)
def _format_error_html(msg):
""" Format an HTML error message """
return u'<p class="error_msg">{msg}</p>'.format(msg=msg)
CARDTYPE_MAP = defaultdict(lambda: "UNKNOWN")
CARDTYPE_MAP.update(
{
'001': 'Visa',
'002': 'MasterCard',
'003': 'American Express',
'004': 'Discover',
'005': 'Diners Club',
'006': 'Carte Blanche',
'007': 'JCB',
'014': 'EnRoute',
'021': 'JAL',
'024': 'Maestro',
'031': 'Delta',
'033': 'Visa Electron',
'034': 'Dankort',
'035': 'Laser',
'036': 'Carte Bleue',
'037': 'Carta Si',
'042': 'Maestro Int.',
'043': 'GE Money UK card'
}
)
REASONCODE_MAP = defaultdict(lambda: "UNKNOWN REASON")
REASONCODE_MAP.update(
{
'100': _('Successful transaction.'),
'102': _('One or more fields in the request contains invalid data.'),
'104': dedent(_(
"""
The access_key and transaction_uuid fields for this authorization request matches the access_key and
transaction_uuid of another authorization request that you sent in the last 15 minutes.
Possible fix: retry the payment after 15 minutes.
""")),
'110': _('Only a partial amount was approved.'),
'200': dedent(_(
"""
The authorization request was approved by the issuing bank but declined by CyberSource
because it did not pass the Address Verification System (AVS).
""")),
'201': dedent(_(
"""
The issuing bank has questions about the request. You do not receive an
authorization code programmatically, but you might receive one verbally by calling the processor.
Possible fix: retry with another form of payment
""")),
'202': dedent(_(
"""
Expired card. You might also receive this if the expiration date you
provided does not match the date the issuing bank has on file.
Possible fix: retry with another form of payment
""")),
'203': dedent(_(
"""
General decline of the card. No other information provided by the issuing bank.
Possible fix: retry with another form of payment
""")),
'204': _('Insufficient funds in the account. Possible fix: retry with another form of payment'),
# 205 was Stolen or lost card. Might as well not show this message to the person using such a card.
'205': _('Stolen or lost card'),
'207': _('Issuing bank unavailable. Possible fix: retry again after a few minutes'),
'208': dedent(_(
"""
Inactive card or card not authorized for card-not-present transactions.
Possible fix: retry with another form of payment
""")),
'210': _('The card has reached the credit limit. Possible fix: retry with another form of payment'),
'211': _('Invalid card verification number (CVN). Possible fix: retry with another form of payment'),
# 221 was The customer matched an entry on the processor's negative file.
# Might as well not show this message to the person using such a card.
'221': _('The customer matched an entry on the processors negative file.'),
'222': _('Account frozen. Possible fix: retry with another form of payment'),
'230': dedent(_(
"""
The authorization request was approved by the issuing bank but declined by
CyberSource because it did not pass the CVN check.
Possible fix: retry with another form of payment
""")),
'231': _('Invalid account number. Possible fix: retry with another form of payment'),
'232': dedent(_(
"""
The card type is not accepted by the payment processor.
Possible fix: retry with another form of payment
""")),
'233': _('General decline by the processor. Possible fix: retry with another form of payment'),
'234': dedent(_(
"""
There is a problem with the information in your CyberSource account. Please let us know at {0}
""".format(settings.PAYMENT_SUPPORT_EMAIL))),
'236': _('Processor Failure. Possible fix: retry the payment'),
'240': dedent(_(
"""
The card type sent is invalid or does not correlate with the credit card number.
Possible fix: retry with the same card or another form of payment
""")),
'475': _('The cardholder is enrolled for payer authentication'),
'476': _('Payer authentication could not be authenticated'),
'520': dedent(_(
"""
The authorization request was approved by the issuing bank but declined by CyberSource based
on your legacy Smart Authorization settings.
Possible fix: retry with a different form of payment.
""")),
}
)
| agpl-3.0 |
das7pad/hangups | hangups/test/test_pblite.py | 3 | 5029 | """Tests for pblite format decoder and encoder."""
from hangups import pblite
from hangups.test import test_pblite_pb2
###############################################################################
# pblite.decode
###############################################################################
def test_decode():
message = test_pblite_pb2.TestMessage()
pblite.decode(message, [
1,
[3, 4],
'foo',
['bar', 'baz'],
1,
[2, 3],
[1],
[[2], [3]],
'AA==',
['AAE=', 'AAEC'],
])
assert message == test_pblite_pb2.TestMessage(
test_int=1,
test_repeated_int=[3, 4],
test_string='foo',
test_repeated_string=['bar', 'baz'],
test_enum=test_pblite_pb2.TestMessage.TEST_1,
test_repeated_enum=[test_pblite_pb2.TestMessage.TEST_2,
test_pblite_pb2.TestMessage.TEST_3],
test_embedded_message=test_pblite_pb2.TestMessage.EmbeddedMessage(
test_embedded_int=1,
),
test_repeated_embedded_message=[
test_pblite_pb2.TestMessage.EmbeddedMessage(
test_embedded_int=2,
),
test_pblite_pb2.TestMessage.EmbeddedMessage(
test_embedded_int=3,
),
],
test_bytes=b'\x00',
test_repeated_bytes=[b'\x00\x01', b'\x00\x01\x02'],
)
def test_decode_unserialized_fields():
message = test_pblite_pb2.TestMessage()
pblite.decode(message, [
None,
None,
'foo',
])
assert message == test_pblite_pb2.TestMessage(
test_string='foo',
)
def test_decode_unknown_field():
message = test_pblite_pb2.TestMessage()
pblite.decode(message, [None] * 99 + [1])
assert message == test_pblite_pb2.TestMessage()
def test_decode_unknown_enum():
message = test_pblite_pb2.TestMessage()
pblite.decode(message, [
None,
None,
None,
None,
99,
])
assert message == test_pblite_pb2.TestMessage()
def test_decode_unknown_repeated_enum():
message = test_pblite_pb2.TestMessage()
pblite.decode(message, [
None,
None,
None,
None,
None,
[1, 99],
])
assert message == test_pblite_pb2.TestMessage()
def test_decode_scalar_wrong_type():
message = test_pblite_pb2.TestMessage()
pblite.decode(message, [
'foo',
])
assert message == test_pblite_pb2.TestMessage()
def test_decode_repeated_scalar_wrong_type():
message = test_pblite_pb2.TestMessage()
pblite.decode(message, [
None,
[1, 'foo', 2]
])
assert message == test_pblite_pb2.TestMessage()
def test_decode_message_wrong_type():
message = test_pblite_pb2.TestMessage()
pblite.decode(message, [
None,
None,
None,
None,
None,
None,
1,
])
assert message == test_pblite_pb2.TestMessage()
def test_decode_repeated_message_wrong_type():
message = test_pblite_pb2.TestMessage()
pblite.decode(message, [
None,
None,
None,
None,
None,
None,
None,
[1],
])
assert message == test_pblite_pb2.TestMessage(
test_repeated_embedded_message=[
test_pblite_pb2.TestMessage.EmbeddedMessage(),
],
)
def test_decode_bytes_wrong_type():
message = test_pblite_pb2.TestMessage()
pblite.decode(message, [
None, None, None, None, None, None, None, None, 1,
])
assert message == test_pblite_pb2.TestMessage()
def test_decode_bytes_invalid_value():
message = test_pblite_pb2.TestMessage()
pblite.decode(message, [
None, None, None, None, None, None, None, None, 'A?==',
])
assert message == test_pblite_pb2.TestMessage()
def test_decode_repeated_bytes_wrong_type():
message = test_pblite_pb2.TestMessage()
pblite.decode(message, [
None, None, None, None, None, None, None, None, None, [1],
])
assert message == test_pblite_pb2.TestMessage()
def test_decode_repeated_bytes_invalid_value():
message = test_pblite_pb2.TestMessage()
pblite.decode(message, [
None, None, None, None, None, None, None, None, None, ['A?=='],
])
assert message == test_pblite_pb2.TestMessage()
def test_decode_ignore_first_item():
message = test_pblite_pb2.TestMessage()
pblite.decode(message, [
'ignored',
1,
[3, 4],
], ignore_first_item=True)
assert message == test_pblite_pb2.TestMessage(
test_int=1,
test_repeated_int=[3, 4],
)
def test_decode_dict():
message = test_pblite_pb2.TestMessage()
pblite.decode(message, [
1,
{
'7': [2],
},
])
assert message == test_pblite_pb2.TestMessage(
test_int=1,
test_embedded_message=test_pblite_pb2.TestMessage.EmbeddedMessage(
test_embedded_int=2,
),
)
| mit |
bbc/kamaelia | Sketches/MH/Layout/Physics/SpatialIndexer.py | 3 | 6248 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
from operator import mul as _mul
from operator import sub as _sub
class SpatialIndexer(object):
"""Allows fast spatial lookups of entities -
quickly find all entities within a given radius of a set of coordinates.
Optimised by specifying the most commonly used proximity distance.
This affects the speed of lookups and the size of the internal data structure.
Entities must provide a getLoc() method that returns the coordinates as a tuple.
To first register entities or if they change coordinates, something must call
updateLoc(<entities>). If entities are removed, something must call remove(<entities>)
"""
def __init__(self, proxDist = 1.0):
if proxDist <= 0.0:
raise ValueError
self.cellSize = proxDist
self.cells = {} # each dict entry is cellCoord -> list of entities
self.entities = {} # each dict entry is entity -> cellCoord
def _coord2cell(self, *coords):
return tuple([int(coord // self.cellSize) for coord in coords])
def updateAll(self):
"""Update all entities"""
self.updateLoc(*self.entities.keys())
def updateLoc(self, *entities):
"""Add new entit(ies), or notify of a position change of entit(ies)."""
for entity in entities:
try:
oldCell = self.entities[entity]
except KeyError:
oldCell = None
# if self.entities.has_key(entity):
# oldCell = self.entities[entity]
# else:
# oldCell = None
newCell = self._coord2cell(*entity.getLoc())
if newCell != oldCell:
if oldCell != None:
self.cells[oldCell].remove(entity)
try:
self.cells[newCell].append(entity)
except KeyError:
self.cells[newCell] = [entity]
# if not self.cells.has_key(newCell):
# self.cells[newCell] = [entity]
# else:
# self.cells[newCell].append(entity)
self.entities[entity] = newCell
add = updateLoc
def remove(self, *entities):
"""Notify that entit(ies) no longer exist (remove them)"""
for entity in entities:
if self.entities.has_key(entity):
self.cells[ self.entities[entity] ].remove(entity)
del self.entities[entity]
def withinRadius(self, centre, radius, filter=(lambda particle:True)):
"""Returns a list of zero or more (particle, distSquared) tuples,
representing those particles within radius distance of the
specified centre coords.
distance-squared from the centre coords is returned too to negate
any need you may have to calculate it again yourself.
You can specify a filter function that takes a candidate particle
as an argument and should return True if it is to be included
(if it is within the radius, of course). This is to allow efficient
pre-filtering of the particles before the distance test is done.
"""
__sub, __mul = _sub, _mul
lbound = [ int((coord-radius) // self.cellSize) for coord in centre ]
ubound = [ int((coord+radius) // self.cellSize) for coord in centre ]
rsquared = radius * radius
inRange = []
cell = lbound[:]# [ coord for coord in lbound ]
inc = 0
while inc == 0:
# go through all entities in this cell
# if self.cells.has_key(tuple(cell)):
try:
for entity in self.cells[tuple(cell)]:
if filter(entity):
# measure the distance from the coord
# distsquared = 0.0
entcoord = entity.getLoc()
sep = map(__sub, centre, entcoord)
distsquared = sum(map(__mul, sep,sep))
# for j in range(0, len(centre)):
# sep = (centre[j] - entcoord[j])
# distsquared += sep*sep
# if within range, then add to the list of nodes to return
if distsquared <= rsquared:
inRange.append( (entity, distsquared) )
except KeyError:
pass
# increment coordinates onto next cell.
# As each coord reaches ubound, do 'carry'
inc = 1
for i in range(0,len(cell)):
cell[i] += inc
if cell[i] > ubound[i]:
cell[i] = lbound[i]
inc = 1
else:
inc = 0
return inRange
if __name__ == "__main__":
x = SpatialIndexer()
print x.withinRadius( (0,0), 1.0)
class Entity(object):
def __init__(self, coords):
self.coords = coords
def getLoc(self):
return self.coords
a = Entity((0.1, 0.2))
b = Entity((1.2, 3.4))
x.add(a,b)
print x.withinRadius( (0,0), 1.0)
print
print x.withinRadius( (0,0), 0.1)
print
print x.withinRadius( (0,0), 5)
print
x.remove(a)
print x.withinRadius( (0,0), 5)
| apache-2.0 |
amiller87/puracore | contrib/devtools/security-check.py | 72 | 6473 | #!/usr/bin/python2
'''
Perform basic ELF security checks on a series of executables.
Exit status will be 0 if succesful, and the program will be silent.
Otherwise the exit status will be 1 and it will log which executables failed which checks.
Needs `readelf` (for ELF) and `objdump` (for PE).
'''
from __future__ import division,print_function
import subprocess
import sys
import os
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump')
def check_ELF_PIE(executable):
'''
Check for position independent executable (PIE), allowing for address space randomization.
'''
p = subprocess.Popen([READELF_CMD, '-h', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.split('\n'):
line = line.split()
if len(line)>=2 and line[0] == 'Type:' and line[1] == 'DYN':
ok = True
return ok
def get_ELF_program_headers(executable):
'''Return type and flags for ELF program headers'''
p = subprocess.Popen([READELF_CMD, '-l', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
in_headers = False
count = 0
headers = []
for line in stdout.split('\n'):
if line.startswith('Program Headers:'):
in_headers = True
if line == '':
in_headers = False
if in_headers:
if count == 1: # header line
ofs_typ = line.find('Type')
ofs_offset = line.find('Offset')
ofs_flags = line.find('Flg')
ofs_align = line.find('Align')
if ofs_typ == -1 or ofs_offset == -1 or ofs_flags == -1 or ofs_align == -1:
raise ValueError('Cannot parse elfread -lW output')
elif count > 1:
typ = line[ofs_typ:ofs_offset].rstrip()
flags = line[ofs_flags:ofs_align].rstrip()
headers.append((typ, flags))
count += 1
return headers
def check_ELF_NX(executable):
'''
Check that no sections are writable and executable (including the stack)
'''
have_wx = False
have_gnu_stack = False
for (typ, flags) in get_ELF_program_headers(executable):
if typ == 'GNU_STACK':
have_gnu_stack = True
if 'W' in flags and 'E' in flags: # section is both writable and executable
have_wx = True
return have_gnu_stack and not have_wx
def check_ELF_RELRO(executable):
'''
Check for read-only relocations.
GNU_RELRO program header must exist
Dynamic section must have BIND_NOW flag
'''
have_gnu_relro = False
for (typ, flags) in get_ELF_program_headers(executable):
# Note: not checking flags == 'R': here as linkers set the permission differently
# This does not affect security: the permission flags of the GNU_RELRO program header are ignored, the PT_LOAD header determines the effective permissions.
# However, the dynamic linker need to write to this area so these are RW.
# Glibc itself takes care of mprotecting this area R after relocations are finished.
# See also http://permalink.gmane.org/gmane.comp.gnu.binutils/71347
if typ == 'GNU_RELRO':
have_gnu_relro = True
have_bindnow = False
p = subprocess.Popen([READELF_CMD, '-d', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
for line in stdout.split('\n'):
tokens = line.split()
if len(tokens)>1 and tokens[1] == '(BIND_NOW)' or (len(tokens)>2 and tokens[1] == '(FLAGS)' and 'BIND_NOW' in tokens[2]):
have_bindnow = True
return have_gnu_relro and have_bindnow
def check_ELF_Canary(executable):
'''
Check for use of stack canary
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.split('\n'):
if '__stack_chk_fail' in line:
ok = True
return ok
def get_PE_dll_characteristics(executable):
'''
Get PE DllCharacteristics bits
'''
p = subprocess.Popen([OBJDUMP_CMD, '-x', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
for line in stdout.split('\n'):
tokens = line.split()
if len(tokens)>=2 and tokens[0] == 'DllCharacteristics':
return int(tokens[1],16)
return 0
def check_PE_PIE(executable):
'''PIE: DllCharacteristics bit 0x40 signifies dynamicbase (ASLR)'''
return bool(get_PE_dll_characteristics(executable) & 0x40)
def check_PE_NX(executable):
'''NX: DllCharacteristics bit 0x100 signifies nxcompat (DEP)'''
return bool(get_PE_dll_characteristics(executable) & 0x100)
CHECKS = {
'ELF': [
('PIE', check_ELF_PIE),
('NX', check_ELF_NX),
('RELRO', check_ELF_RELRO),
('Canary', check_ELF_Canary)
],
'PE': [
('PIE', check_PE_PIE),
('NX', check_PE_NX)
]
}
def identify_executable(executable):
with open(filename, 'rb') as f:
magic = f.read(4)
if magic.startswith(b'MZ'):
return 'PE'
elif magic.startswith(b'\x7fELF'):
return 'ELF'
return None
if __name__ == '__main__':
retval = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
if etype is None:
print('%s: unknown format' % filename)
retval = 1
continue
failed = []
for (name, func) in CHECKS[etype]:
if not func(filename):
failed.append(name)
if failed:
print('%s: failed %s' % (filename, ' '.join(failed)))
retval = 1
except IOError:
print('%s: cannot open' % filename)
retval = 1
exit(retval)
| mit |
grimoirelab/perceval | tests/mocked_package/nested_package/nested_backend_b.py | 1 | 1242 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2019 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>..
#
# Authors:
# Valerio Cosentino <valcos@bitergia.com>
#
from perceval.backend import (Backend,
BackendCommand)
class BackendB(Backend):
"""Mocked backend class used for testing"""
def __init__(self, origin, tag=None, archive=None):
super().__init__(origin, tag=tag, archive=archive)
class BackendCommandB(BackendCommand):
"""Mocked backend command class used for testing"""
BACKEND = BackendB
def __init__(self, *args):
super().__init__(*args)
| gpl-3.0 |
JakeBrand/CMPUT410-E6 | bookmarks/main/migrations/0001_initial.py | 11 | 1161 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Link',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(unique=True, max_length=128)),
('url', models.URLField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=128)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='link',
name='tags',
field=models.ManyToManyField(to='main.Tag'),
preserve_default=True,
),
]
| apache-2.0 |
s20121035/rk3288_android5.1_repo | external/llvm/test/CodeGen/SystemZ/Large/branch-range-01.py | 13 | 3092 | # Test normal conditional branches in cases where the sheer number of
# instructions causes some branches to be out of range.
# RUN: python %s | llc -mtriple=s390x-linux-gnu | FileCheck %s
# Construct:
#
# before0:
# conditional branch to after0
# ...
# beforeN:
# conditional branch to after0
# main:
# 0xffd8 bytes, from MVIY instructions
# conditional branch to main
# after0:
# ...
# conditional branch to main
# afterN:
#
# Each conditional branch sequence occupies 8 bytes if it uses a short branch
# and 10 if it uses a long one. The ones before "main:" have to take the branch
# length into account -- which is 4 bytes for short branches -- so the final
# (0x28 - 4) / 8 == 4 blocks can use short branches. The ones after "main:"
# do not, so the first 0x28 / 8 == 5 can use short branches. However,
# the conservative algorithm we use makes one branch unnecessarily long
# on each side.
#
# CHECK: c %r4, 0(%r3)
# CHECK: jge [[LABEL:\.L[^ ]*]]
# CHECK: c %r4, 4(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 8(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 12(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 16(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 20(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 24(%r3)
# CHECK: j{{g?}}e [[LABEL]]
# CHECK: c %r4, 28(%r3)
# CHECK: je [[LABEL]]
# CHECK: c %r4, 32(%r3)
# CHECK: je [[LABEL]]
# CHECK: c %r4, 36(%r3)
# CHECK: je [[LABEL]]
# ...main goes here...
# CHECK: c %r4, 100(%r3)
# CHECK: je [[LABEL:\.L[^ ]*]]
# CHECK: c %r4, 104(%r3)
# CHECK: je [[LABEL]]
# CHECK: c %r4, 108(%r3)
# CHECK: je [[LABEL]]
# CHECK: c %r4, 112(%r3)
# CHECK: je [[LABEL]]
# CHECK: c %r4, 116(%r3)
# CHECK: j{{g?}}e [[LABEL]]
# CHECK: c %r4, 120(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 124(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 128(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 132(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 136(%r3)
# CHECK: jge [[LABEL]]
branch_blocks = 10
main_size = 0xffd8
print 'define void @f1(i8 *%base, i32 *%stop, i32 %limit) {'
print 'entry:'
print ' br label %before0'
print ''
for i in xrange(branch_blocks):
next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
print 'before%d:' % i
print ' %%bstop%d = getelementptr i32 *%%stop, i64 %d' % (i, i)
print ' %%bcur%d = load i32 *%%bstop%d' % (i, i)
print ' %%btest%d = icmp eq i32 %%limit, %%bcur%d' % (i, i)
print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
print ''
print '%s:' % next
a, b = 1, 1
for i in xrange(0, main_size, 6):
a, b = b, a + b
offset = 4096 + b % 500000
value = a % 256
print ' %%ptr%d = getelementptr i8 *%%base, i64 %d' % (i, offset)
print ' store volatile i8 %d, i8 *%%ptr%d' % (value, i)
for i in xrange(branch_blocks):
print ' %%astop%d = getelementptr i32 *%%stop, i64 %d' % (i, i + 25)
print ' %%acur%d = load i32 *%%astop%d' % (i, i)
print ' %%atest%d = icmp eq i32 %%limit, %%acur%d' % (i, i)
print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
print ''
print 'after%d:' % i
print ' ret void'
print '}'
| gpl-3.0 |
andfoy/margffoy-tuay-server | env/lib/python2.7/site-packages/Mezzanine-3.1.10-py2.7.egg/mezzanine/boot/lazy_admin.py | 26 | 3063 | from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib.auth import get_user_model
from django.contrib.admin.sites import AdminSite, NotRegistered
from mezzanine.utils.importing import import_dotted_path
class LazyAdminSite(AdminSite):
"""
Defers calls to register/unregister until autodiscover is called
to avoid load issues with injectable model fields defined by
``settings.EXTRA_MODEL_FIELDS``.
"""
def __init__(self, *args, **kwargs):
self._deferred = []
super(LazyAdminSite, self).__init__(*args, **kwargs)
def register(self, *args, **kwargs):
for name, deferred_args, deferred_kwargs in self._deferred:
if name == "unregister" and deferred_args[0] == args[0]:
self._deferred.append(("register", args, kwargs))
break
else:
super(LazyAdminSite, self).register(*args, **kwargs)
def unregister(self, *args, **kwargs):
self._deferred.append(("unregister", args, kwargs))
def lazy_registration(self):
# First, directly handle models we don't want at all,
# as per the ``ADMIN_REMOVAL`` setting.
for model in getattr(settings, "ADMIN_REMOVAL", []):
try:
model = tuple(model.rsplit(".", 1))
exec("from %s import %s" % model)
except ImportError:
pass
else:
try:
AdminSite.unregister(self, eval(model[1]))
except NotRegistered:
pass
# Call register/unregister.
for name, deferred_args, deferred_kwargs in self._deferred:
getattr(AdminSite, name)(self, *deferred_args, **deferred_kwargs)
@property
def urls(self):
urls = patterns("", ("", super(LazyAdminSite, self).urls),)
# Filebrowser admin media library.
fb_name = getattr(settings, "PACKAGE_NAME_FILEBROWSER", "")
if fb_name in settings.INSTALLED_APPS:
try:
fb_urls = import_dotted_path("%s.sites.site" % fb_name).urls
except ImportError:
fb_urls = "%s.urls" % fb_name
urls = patterns("", ("^media-library/", include(fb_urls)),) + urls
# Give the urlpatterm for the user password change view an
# actual name, so that it can be reversed with multiple
# languages are supported in the admin.
User = get_user_model()
for admin in self._registry.values():
user_change_password = getattr(admin, "user_change_password", None)
if user_change_password:
bits = (User._meta.app_label, User._meta.object_name.lower())
urls = patterns("",
url("^%s/%s/(\d+)/password/$" % bits,
self.admin_view(user_change_password),
name="user_change_password"),
) + urls
break
return urls
| gpl-2.0 |
thodoris/djangoPharma | djangoPharma/env/Lib/site-packages/django/core/management/utils.py | 67 | 3739 | from __future__ import unicode_literals
import os
import sys
from subprocess import PIPE, Popen
from django.apps import apps as installed_apps
from django.utils import six
from django.utils.crypto import get_random_string
from django.utils.encoding import DEFAULT_LOCALE_ENCODING, force_text
from .base import CommandError
def popen_wrapper(args, os_err_exc_type=CommandError, stdout_encoding='utf-8'):
"""
Friendly wrapper around Popen.
Returns stdout output, stderr output and OS status code.
"""
try:
p = Popen(args, shell=False, stdout=PIPE, stderr=PIPE, close_fds=os.name != 'nt')
except OSError as e:
strerror = force_text(e.strerror, DEFAULT_LOCALE_ENCODING, strings_only=True)
six.reraise(os_err_exc_type, os_err_exc_type('Error executing %s: %s' %
(args[0], strerror)), sys.exc_info()[2])
output, errors = p.communicate()
return (
force_text(output, stdout_encoding, strings_only=True, errors='strict'),
force_text(errors, DEFAULT_LOCALE_ENCODING, strings_only=True, errors='replace'),
p.returncode
)
def handle_extensions(extensions):
"""
Organizes multiple extensions that are separated with commas or passed by
using --extension/-e multiple times.
For example: running 'django-admin makemessages -e js,txt -e xhtml -a'
would result in an extension list: ['.js', '.txt', '.xhtml']
>>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py'])
{'.html', '.js', '.py'}
>>> handle_extensions(['.html, txt,.tpl'])
{'.html', '.tpl', '.txt'}
"""
ext_list = []
for ext in extensions:
ext_list.extend(ext.replace(' ', '').split(','))
for i, ext in enumerate(ext_list):
if not ext.startswith('.'):
ext_list[i] = '.%s' % ext_list[i]
return set(ext_list)
def find_command(cmd, path=None, pathext=None):
if path is None:
path = os.environ.get('PATH', '').split(os.pathsep)
if isinstance(path, six.string_types):
path = [path]
# check if there are funny path extensions for executables, e.g. Windows
if pathext is None:
pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD').split(os.pathsep)
# don't use extensions if the command ends with one of them
for ext in pathext:
if cmd.endswith(ext):
pathext = ['']
break
# check if we find the command on PATH
for p in path:
f = os.path.join(p, cmd)
if os.path.isfile(f):
return f
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
return fext
return None
def get_random_secret_key():
"""
Return a 50 character random string usable as a SECRET_KEY setting value.
"""
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
return get_random_string(50, chars)
def parse_apps_and_model_labels(labels):
"""
Parse a list of "app_label.ModelName" or "app_label" strings into actual
objects and return a two-element tuple:
(set of model classes, set of app_configs).
Raise a CommandError if some specified models or apps don't exist.
"""
apps = set()
models = set()
for label in labels:
if '.' in label:
try:
model = installed_apps.get_model(label)
except LookupError:
raise CommandError('Unknown model: %s' % label)
models.add(model)
else:
try:
app_config = installed_apps.get_app_config(label)
except LookupError as e:
raise CommandError(str(e))
apps.add(app_config)
return models, apps
| apache-2.0 |
adematte/ansible-modules-extras | network/f5/bigip_monitor_tcp.py | 33 | 16829 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, serge van Ginderachter <serge@vanginderachter.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: bigip_monitor_tcp
short_description: "Manages F5 BIG-IP LTM tcp monitors"
description:
- "Manages F5 BIG-IP LTM tcp monitors via iControl SOAP API"
version_added: "1.4"
author: Serge van Ginderachter
notes:
- "Requires BIG-IP software version >= 11"
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- "Best run as a local_action in your playbook"
- "Monitor API documentation: https://devcentral.f5.com/wiki/iControl.LocalLB__Monitor.ashx"
requirements:
- bigsuds
options:
server:
description:
- BIG-IP host
required: true
default: null
user:
description:
- BIG-IP username
required: true
default: null
password:
description:
- BIG-IP password
required: true
default: null
state:
description:
- Monitor state
required: false
default: 'present'
choices: ['present', 'absent']
name:
description:
- Monitor name
required: true
default: null
aliases: ['monitor']
partition:
description:
- Partition for the monitor
required: false
default: 'Common'
type:
description:
- The template type of this monitor template
required: false
default: 'tcp'
choices: [ 'TTYPE_TCP', 'TTYPE_TCP_ECHO', 'TTYPE_TCP_HALF_OPEN']
parent:
description:
- The parent template of this monitor template
required: false
default: 'tcp'
choices: [ 'tcp', 'tcp_echo', 'tcp_half_open']
parent_partition:
description:
- Partition for the parent monitor
required: false
default: 'Common'
send:
description:
- The send string for the monitor call
required: true
default: none
receive:
description:
- The receive string for the monitor call
required: true
default: none
ip:
description:
- IP address part of the ipport definition. The default API setting
is "0.0.0.0".
required: false
default: none
port:
description:
- port address part op the ipport definition. The default API
setting is 0.
required: false
default: none
interval:
description:
- The interval specifying how frequently the monitor instance
of this template will run. By default, this interval is used for up and
down states. The default API setting is 5.
required: false
default: none
timeout:
description:
- The number of seconds in which the node or service must respond to
the monitor request. If the target responds within the set time
period, it is considered up. If the target does not respond within
the set time period, it is considered down. You can change this
number to any number you want, however, it should be 3 times the
interval number of seconds plus 1 second. The default API setting
is 16.
required: false
default: none
time_until_up:
description:
- Specifies the amount of time in seconds after the first successful
response before a node will be marked up. A value of 0 will cause a
node to be marked up immediately after a valid response is received
from the node. The default API setting is 0.
required: false
default: none
'''
EXAMPLES = '''
- name: BIGIP F5 | Create TCP Monitor
local_action:
module: bigip_monitor_tcp
state: present
server: "{{ f5server }}"
user: "{{ f5user }}"
password: "{{ f5password }}"
name: "{{ item.monitorname }}"
type: tcp
send: "{{ item.send }}"
receive: "{{ item.receive }}"
with_items: f5monitors-tcp
- name: BIGIP F5 | Create TCP half open Monitor
local_action:
module: bigip_monitor_tcp
state: present
server: "{{ f5server }}"
user: "{{ f5user }}"
password: "{{ f5password }}"
name: "{{ item.monitorname }}"
type: tcp
send: "{{ item.send }}"
receive: "{{ item.receive }}"
with_items: f5monitors-halftcp
- name: BIGIP F5 | Remove TCP Monitor
local_action:
module: bigip_monitor_tcp
state: absent
server: "{{ f5server }}"
user: "{{ f5user }}"
password: "{{ f5password }}"
name: "{{ monitorname }}"
with_flattened:
- f5monitors-tcp
- f5monitors-halftcp
'''
try:
import bigsuds
except ImportError:
bigsuds_found = False
else:
bigsuds_found = True
TEMPLATE_TYPE = DEFAULT_TEMPLATE_TYPE = 'TTYPE_TCP'
TEMPLATE_TYPE_CHOICES = ['tcp', 'tcp_echo', 'tcp_half_open']
DEFAULT_PARENT = DEFAULT_TEMPLATE_TYPE_CHOICE = DEFAULT_TEMPLATE_TYPE.replace('TTYPE_', '').lower()
# ===========================================
# bigip_monitor module generic methods.
# these should be re-useable for other monitor types
#
def bigip_api(bigip, user, password):
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
return api
def check_monitor_exists(module, api, monitor, parent):
# hack to determine if monitor exists
result = False
try:
ttype = api.LocalLB.Monitor.get_template_type(template_names=[monitor])[0]
parent2 = api.LocalLB.Monitor.get_parent_template(template_names=[monitor])[0]
if ttype == TEMPLATE_TYPE and parent == parent2:
result = True
else:
module.fail_json(msg='Monitor already exists, but has a different type (%s) or parent(%s)' % (ttype, parent))
except bigsuds.OperationFailed, e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def create_monitor(api, monitor, template_attributes):
try:
api.LocalLB.Monitor.create_template(templates=[{'template_name': monitor, 'template_type': TEMPLATE_TYPE}], template_attributes=[template_attributes])
except bigsuds.OperationFailed, e:
if "already exists" in str(e):
return False
else:
# genuine exception
raise
return True
def delete_monitor(api, monitor):
try:
api.LocalLB.Monitor.delete_template(template_names=[monitor])
except bigsuds.OperationFailed, e:
# maybe it was deleted since we checked
if "was not found" in str(e):
return False
else:
# genuine exception
raise
return True
def check_string_property(api, monitor, str_property):
try:
return str_property == api.LocalLB.Monitor.get_template_string_property([monitor], [str_property['type']])[0]
except bigsuds.OperationFailed, e:
# happens in check mode if not created yet
if "was not found" in str(e):
return True
else:
# genuine exception
raise
return True
def set_string_property(api, monitor, str_property):
api.LocalLB.Monitor.set_template_string_property(template_names=[monitor], values=[str_property])
def check_integer_property(api, monitor, int_property):
try:
return int_property == api.LocalLB.Monitor.get_template_integer_property([monitor], [int_property['type']])[0]
except bigsuds.OperationFailed, e:
# happens in check mode if not created yet
if "was not found" in str(e):
return True
else:
# genuine exception
raise
return True
def set_integer_property(api, monitor, int_property):
api.LocalLB.Monitor.set_template_int_property(template_names=[monitor], values=[int_property])
def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties):
changed = False
for str_property in template_string_properties:
if str_property['value'] is not None and not check_string_property(api, monitor, str_property):
if not module.check_mode:
set_string_property(api, monitor, str_property)
changed = True
for int_property in template_integer_properties:
if int_property['value'] is not None and not check_integer_property(api, monitor, int_property):
if not module.check_mode:
set_integer_property(api, monitor, int_property)
changed = True
return changed
def get_ipport(api, monitor):
return api.LocalLB.Monitor.get_template_destination(template_names=[monitor])[0]
def set_ipport(api, monitor, ipport):
try:
api.LocalLB.Monitor.set_template_destination(template_names=[monitor], destinations=[ipport])
return True, ""
except bigsuds.OperationFailed, e:
if "Cannot modify the address type of monitor" in str(e):
return False, "Cannot modify the address type of monitor if already assigned to a pool."
else:
# genuine exception
raise
# ===========================================
# main loop
#
# writing a module for other monitor types should
# only need an updated main() (and monitor specific functions)
def main():
# begin monitor specific stuff
module = AnsibleModule(
argument_spec = dict(
server = dict(required=True),
user = dict(required=True),
password = dict(required=True),
partition = dict(default='Common'),
state = dict(default='present', choices=['present', 'absent']),
name = dict(required=True),
type = dict(default=DEFAULT_TEMPLATE_TYPE_CHOICE, choices=TEMPLATE_TYPE_CHOICES),
parent = dict(default=DEFAULT_PARENT),
parent_partition = dict(default='Common'),
send = dict(required=False),
receive = dict(required=False),
ip = dict(required=False),
port = dict(required=False, type='int'),
interval = dict(required=False, type='int'),
timeout = dict(required=False, type='int'),
time_until_up = dict(required=False, type='int', default=0)
),
supports_check_mode=True
)
server = module.params['server']
user = module.params['user']
password = module.params['password']
partition = module.params['partition']
parent_partition = module.params['parent_partition']
state = module.params['state']
name = module.params['name']
type = 'TTYPE_' + module.params['type'].upper()
parent = "/%s/%s" % (parent_partition, module.params['parent'])
monitor = "/%s/%s" % (partition, name)
send = module.params['send']
receive = module.params['receive']
ip = module.params['ip']
port = module.params['port']
interval = module.params['interval']
timeout = module.params['timeout']
time_until_up = module.params['time_until_up']
# tcp monitor has multiple types, so overrule
global TEMPLATE_TYPE
TEMPLATE_TYPE = type
# end monitor specific stuff
if not bigsuds_found:
module.fail_json(msg="the python bigsuds module is required")
api = bigip_api(server, user, password)
monitor_exists = check_monitor_exists(module, api, monitor, parent)
# ipport is a special setting
if monitor_exists: # make sure to not update current settings if not asked
cur_ipport = get_ipport(api, monitor)
if ip is None:
ip = cur_ipport['ipport']['address']
if port is None:
port = cur_ipport['ipport']['port']
else: # use API defaults if not defined to create it
if interval is None:
interval = 5
if timeout is None:
timeout = 16
if ip is None:
ip = '0.0.0.0'
if port is None:
port = 0
if send is None:
send = ''
if receive is None:
receive = ''
# define and set address type
if ip == '0.0.0.0' and port == 0:
address_type = 'ATYPE_STAR_ADDRESS_STAR_PORT'
elif ip == '0.0.0.0' and port != 0:
address_type = 'ATYPE_STAR_ADDRESS_EXPLICIT_PORT'
elif ip != '0.0.0.0' and port != 0:
address_type = 'ATYPE_EXPLICIT_ADDRESS_EXPLICIT_PORT'
else:
address_type = 'ATYPE_UNSET'
ipport = {'address_type': address_type,
'ipport': {'address': ip,
'port': port}}
template_attributes = {'parent_template': parent,
'interval': interval,
'timeout': timeout,
'dest_ipport': ipport,
'is_read_only': False,
'is_directly_usable': True}
# monitor specific stuff
if type == 'TTYPE_TCP':
template_string_properties = [{'type': 'STYPE_SEND',
'value': send},
{'type': 'STYPE_RECEIVE',
'value': receive}]
else:
template_string_properties = []
template_integer_properties = [{'type': 'ITYPE_INTERVAL',
'value': interval},
{'type': 'ITYPE_TIMEOUT',
'value': timeout},
{'type': 'ITYPE_TIME_UNTIL_UP',
'value': interval}]
# main logic, monitor generic
try:
result = {'changed': False} # default
if state == 'absent':
if monitor_exists:
if not module.check_mode:
# possible race condition if same task
# on other node deleted it first
result['changed'] |= delete_monitor(api, monitor)
else:
result['changed'] |= True
else: # state present
## check for monitor itself
if not monitor_exists: # create it
if not module.check_mode:
# again, check changed status here b/c race conditions
# if other task already created it
result['changed'] |= create_monitor(api, monitor, template_attributes)
else:
result['changed'] |= True
## check for monitor parameters
# whether it already existed, or was just created, now update
# the update functions need to check for check mode but
# cannot update settings if it doesn't exist which happens in check mode
if monitor_exists and not module.check_mode:
result['changed'] |= update_monitor_properties(api, module, monitor,
template_string_properties,
template_integer_properties)
# else assume nothing changed
# we just have to update the ipport if monitor already exists and it's different
if monitor_exists and cur_ipport != ipport:
set_ipport(api, monitor, ipport)
result['changed'] |= True
#else: monitor doesn't exist (check mode) or ipport is already ok
except Exception, e:
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.